diff --git a/.github/workflows/cluster-it.yml b/.github/workflows/cluster-it.yml
new file mode 100644
index 000000000000..323bb3182936
--- /dev/null
+++ b/.github/workflows/cluster-it.yml
@@ -0,0 +1,79 @@
+name: New Cluster IT
+
+on:
+ push:
+ branches:
+ - master
+ paths-ignore:
+ - 'docs/**'
+ pull_request:
+ branches:
+ - master
+ paths-ignore:
+ - 'docs/**'
+ # allow manually run the action:
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+env:
+ MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+
+jobs:
+ ClusterIT:
+ strategy:
+ fail-fast: false
+ max-parallel: 20
+ matrix:
+ java: [ 8, 11, 17 ]
+ os: [ ubuntu-latest, macos-latest, windows-latest ]
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK ${{ matrix.java }}
+ uses: actions/setup-java@v1
+ with:
+ java-version: ${{ matrix.java }}
+ - name: Cache Maven packages
+ uses: actions/cache@v2
+ with:
+ path: ~/.m2
+ key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
+ restore-keys: ${{ runner.os }}-m2-
+ - name: Check Apache Rat
+ run: mvn -B apache-rat:check -P site -P code-coverage
+ - name: Adjust network dynamic TCP ports range
+ if: ${{ runner.os == 'Windows' }}
+ shell: pwsh
+ run: |
+ netsh int ipv4 set dynamicport tcp start=32768 num=32768
+ netsh int ipv4 set dynamicport udp start=32768 num=32768
+ netsh int ipv6 set dynamicport tcp start=32768 num=32768
+ netsh int ipv6 set dynamicport udp start=32768 num=32768
+ - name: Adjust Linux kernel somaxconn
+ if: ${{ runner.os == 'Linux' }}
+ shell: bash
+ run: sudo sysctl -w net.core.somaxconn=65535
+ - name: Adjust Mac kernel somaxconn
+ if: ${{ runner.os == 'macOS' }}
+ shell: bash
+ run: sudo sysctl -w kern.ipc.somaxconn=65535
+ - name: IT/UT Test
+ shell: bash
+ # we do not compile client-cpp for saving time, it is tested in client.yml
+ # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml
+ run: |
+ mvn clean verify \
+ -DskipUTs \
+ -pl integration-test \
+ -am -PClusterIT
+ - name: Upload Artifact
+ if: failure()
+ uses: actions/upload-artifact@v3
+ with:
+ name: cluster-log-java${{ matrix.java }}-${{ runner.os }}
+ path: integration-test/target/cluster-logs
+ retention-days: 1
diff --git a/.github/workflows/cluster.yml b/.github/workflows/cluster.yml
deleted file mode 100644
index e6c3f48ec386..000000000000
--- a/.github/workflows/cluster.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: Cluster Test
-
-on:
- push:
- branches:
- - test_cluster
- paths-ignore:
- - 'docs/**'
- pull_request:
- branches:
- - test_cluster
- paths-ignore:
- - 'docs/**'
- # allow manually run the action:
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.ref }}
- cancel-in-progress: true
-
-env:
- MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
-
-jobs:
- unix:
- strategy:
- fail-fast: false
- max-parallel: 20
- matrix:
- java: [ 8 ]
- os: [ ubuntu-latest ]
- runs-on: ${{ matrix.os}}
-
- steps:
- - uses: actions/checkout@v2
- - name: Set up JDK ${{ matrix.java }}
- uses: actions/setup-java@v1
- with:
- java-version: ${{ matrix.java }}
- - name: Cache Maven packages
- uses: actions/cache@v2
- with:
- path: ~/.m2
- key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
- restore-keys: ${{ runner.os }}-m2-
- - name: Check Apache Rat
- run: mvn -B apache-rat:check -P site -P code-coverage
- - name: IT/UT Test
- shell: bash
- # we do not compile client-cpp for saving time, it is tested in client.yml
- # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml
- run: mvn -B clean verify -Dsession.test.skip=true -Diotdb.test.skip=true -Dcluster.test.skip=true -Dtsfile.test.skip=true -pl integration -am -PCluster
diff --git a/.github/workflows/sonar-coveralls.yml b/.github/workflows/sonar-coveralls.yml
index 84b30f0d0b97..8b8450789b47 100644
--- a/.github/workflows/sonar-coveralls.yml
+++ b/.github/workflows/sonar-coveralls.yml
@@ -70,4 +70,4 @@ jobs:
-Dsonar.projectKey=apache_incubator-iotdb \
-Dsonar.host.url=https://sonarcloud.io \
-Dsonar.login=${{ secrets.SONARCLOUD_TOKEN }} \
- -DskipTests -pl '!distribution' -P '!testcontainer' -am
+ -DskipTests -pl '!distribution,!integration-test' -P '!testcontainer' -am
diff --git a/.github/workflows/standalone-it-for-mpp.yml b/.github/workflows/standalone-it-for-mpp.yml
new file mode 100644
index 000000000000..6617292221ff
--- /dev/null
+++ b/.github/workflows/standalone-it-for-mpp.yml
@@ -0,0 +1,81 @@
+name: New Standalone IT
+
+on:
+ push:
+ branches:
+ - master
+ paths-ignore:
+ - 'docs/**'
+ pull_request:
+ branches:
+ - master
+ paths-ignore:
+ - 'docs/**'
+ # allow manually run the action:
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+env:
+ MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+
+jobs:
+ StandaloneMppIT:
+ strategy:
+ fail-fast: false
+ max-parallel: 20
+ matrix:
+ java: [ 8, 11, 17 ]
+ os: [ ubuntu-latest, macos-latest, windows-latest ]
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK ${{ matrix.java }}
+ uses: actions/setup-java@v1
+ with:
+ java-version: ${{ matrix.java }}
+ - name: Cache Maven packages
+ uses: actions/cache@v2
+ with:
+ path: ~/.m2
+ key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
+ restore-keys: ${{ runner.os }}-m2-
+ - name: Check Apache Rat
+ run: mvn -B apache-rat:check -P site -P code-coverage
+ - name: Adjust network dynamic TCP ports range
+ if: ${{ runner.os == 'Windows' }}
+ shell: pwsh
+ run: |
+ netsh int ipv4 set dynamicport tcp start=32768 num=32768
+ netsh int ipv4 set dynamicport udp start=32768 num=32768
+ netsh int ipv6 set dynamicport tcp start=32768 num=32768
+ netsh int ipv6 set dynamicport udp start=32768 num=32768
+ - name: Adjust Linux kernel somaxconn
+ if: ${{ runner.os == 'Linux' }}
+ shell: bash
+ run: sudo sysctl -w net.core.somaxconn=65535
+ - name: Adjust Mac kernel somaxconn
+ if: ${{ runner.os == 'macOS' }}
+ shell: bash
+ run: sudo sysctl -w kern.ipc.somaxconn=65535
+ - name: IT/UT Test
+ shell: bash
+ # we do not compile client-cpp for saving time, it is tested in client.yml
+ # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml
+ run: |
+ mvn clean verify \
+ -DskipUTs \
+ -DintegrationTest.forkCount=2 \
+ -pl integration-test \
+ -am -PLocalStandaloneOnMppIT
+ - name: Upload Artifact
+ if: failure()
+ uses: actions/upload-artifact@v3
+ with:
+ name: standalone-log-java${{ matrix.java }}-${{ runner.os }}
+ path: integration-test/target/cluster-logs
+ retention-days: 1
+
diff --git a/.gitignore b/.gitignore
index 68d7afef1865..6b90b2f1f5c6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,7 @@ tsfile-jdbc/src/main/resources/output/queryRes.csv
*.gz
*.tar.gz
*.tar
+*.tokens
#src/test/resources/logback.xml
### CSV ###
diff --git a/LICENSE b/LICENSE
index 2cabd6a47c68..6c45bf8f5d7c 100644
--- a/LICENSE
+++ b/LICENSE
@@ -237,10 +237,40 @@ License: http://www.apache.org/licenses/LICENSE-2.0
--------------------------------------------------------------------------------
+The following files include code modified from Apache HBase project.
+
+./confignode/src/main/java/org/apache/iotdb/procedure/Procedure.java
+./confignode/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java
+./confignode/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java
+./confignode/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java
+./confignode/src/main/java/org/apache/iotdb/procedure/StoppableThread.java
+
+Copyright: 2016-2018 Michael Burman and/or other contributors
+Project page: https://github.com/burmanm/gorilla-tsc
+License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
The following files include code modified from Eclipse Collections project.
./tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ByteArrayList.java
Copyright: 2021 Goldman Sachs
Project page: https://www.eclipse.org/collections
-License: https://github.com/eclipse/eclipse-collections/blob/master/LICENSE-EDL-1.0.txt
\ No newline at end of file
+License: https://github.com/eclipse/eclipse-collections/blob/master/LICENSE-EDL-1.0.txt
+
+--------------------------------------------------------------------------------
+
+The following files include code modified from Micrometer project.
+
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmClassLoaderMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmCompileMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmGcMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmMemoryMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmThreadMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/logback/LogbackMetrics
+./metrics/interface/src/main/java/org/apache/iotdb/metrics/utils/JvmUtils
+
+Copyright: 2017 VMware
+Project page: https://github.com/micrometer-metrics/micrometer
+License: https://github.com/micrometer-metrics/micrometer/blob/main/LICENSE
\ No newline at end of file
diff --git a/README.md b/README.md
index 0ccc8d0dc834..3b05b9d6deef 100644
--- a/README.md
+++ b/README.md
@@ -175,26 +175,12 @@ and "`antlr/target/generated-sources/antlr4`" need to be added to sources roots
**In IDEA, you just need to right click on the root project name and choose "`Maven->Reload Project`" after
you run `mvn package` successfully.**
-#### Spotless problem
-**NOTE**: IF you are using JDK16+, you have to create a file called `jvm.config`,
-put it under `.mvn/`, before you use `spotless:apply`. The file contains the following content:
-```
---add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED
-```
-
-This is [an issue of Spotless](https://github.com/diffplug/spotless/issues/834),
-Once the issue is fixed, we can remove this file.
-
### Configurations
configuration files are under "conf" folder
- * environment config module (`iotdb-env.bat`, `iotdb-env.sh`),
- * system config module (`iotdb-engine.properties`)
+ * environment config module (`datanode-env.bat`, `datanode-env.sh`),
+ * system config module (`iotdb-datanode.properties`)
* log config module (`logback.xml`).
For more information, please see [Config Manual](https://iotdb.apache.org/UserGuide/Master/Reference/Config-Manual.html).
diff --git a/README_ZH.md b/README_ZH.md
index 9b50c1d13787..de98411dd7e7 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -161,24 +161,11 @@ git checkout vx.x.x
**IDEA的操作方法:在上述maven命令编译好后,右键项目名称,选择"`Maven->Reload project`",即可。**
-#### Spotless问题(JDK16+)
-**NOTE**: 如果你在使用 JDK16+, 并且要做`spotless:apply`或者`spotless:check`,
-那么需要在`.mvn/`文件夹下创建一个文件 `jvm.config`, 内容如下:
-```
---add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED
---add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED
-```
-这是spotless依赖的googlecodeformat的 [问题](https://github.com/diffplug/spotless/issues/834),
-近期可能会被官方解决。
-
### 配置
配置文件在"conf"文件夹下
-* 环境配置模块(`iotdb-env.bat`, `iotdb-env.sh`),
-* 系统配置模块(`iotdb-engine.properties`)
+* 环境配置模块(`datanode-env.bat`, `datanode-env.sh`),
+* 系统配置模块(`iotdb-datanode.properties`)
* 日志配置模块(`logback.xml`)。
有关详细信息,请参见[配置参数](https://iotdb.apache.org/zh/UserGuide/Master/Reference/Config-Manual.html)。
diff --git a/antlr/pom.xml b/antlr/pom.xml
index 64015796385e..a66513db4c64 100644
--- a/antlr/pom.xml
+++ b/antlr/pom.xml
@@ -47,6 +47,7 @@
falsetrue
+ src/main/antlr4/org/apache/iotdb/db/qp/sqlantlr4
diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4
new file mode 100644
index 000000000000..4387f7c22565
--- /dev/null
+++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+parser grammar IdentifierParser;
+
+options { tokenVocab=SqlLexer; }
+
+identifier
+ : keyWords
+ | ID
+ | QUOTED_ID
+ ;
+
+
+// List of keywords, new keywords that can be used as identifiers should be added into this list. For example, 'not' is an identifier but can not be used as an identifier in node name.
+
+keyWords
+ : ADD
+ | AFTER
+ | ALIAS
+ | ALIGN
+ | ALIGNED
+ | ALL
+ | ALTER
+ | ANY
+ | APPEND
+ | AS
+ | ASC
+ | ATTRIBUTES
+ | AUTOREGISTER
+ | BEFORE
+ | BEGIN
+ | BOUNDARY
+ | BY
+ | CACHE
+ | CHILD
+ | CLEAR
+ | CLUSTER
+ | CONCAT
+ | CONFIGURATION
+ | CONTINUOUS
+ | COUNT
+ | CONTAIN
+ | CQ
+ | CQS
+ | CREATE
+ | DATA
+ | DEBUG
+ | DELETE
+ | DESC
+ | DESCRIBE
+ | DEVICE
+ | DEVICES
+ | DISABLE
+ | DROP
+ | END
+ | EVERY
+ | EXPLAIN
+ | FILL
+ | FLUSH
+ | FOR
+ | FROM
+ | FULL
+ | FUNCTION
+ | FUNCTIONS
+ | GLOBAL
+ | GRANT
+ | GROUP
+ | INDEX
+ | INFO
+ | INSERT
+ | INTO
+ | KILL
+ | LABEL
+ | LAST
+ | LATEST
+ | LEVEL
+ | LIKE
+ | LIMIT
+ | LINEAR
+ | LINK
+ | LIST
+ | LOAD
+ | LOCAL
+ | LOCK
+ | MERGE
+ | METADATA
+ | NODES
+ | NOW
+ | OF
+ | OFF
+ | OFFSET
+ | ON
+ | ORDER
+ | PARTITION
+ | PASSWORD
+ | PATHS
+ | PIPE
+ | PIPES
+ | PIPESERVER
+ | PIPESINK
+ | PIPESINKS
+ | PIPESINKTYPE
+ | PREVIOUS
+ | PREVIOUSUNTILLAST
+ | PRIVILEGES
+ | PROCESSLIST
+ | PROPERTY
+ | PRUNE
+ | QUERIES
+ | QUERY
+ | READONLY
+ | REGEXP
+ | REGIONS
+ | REMOVE
+ | RENAME
+ | RESAMPLE
+ | RESOURCE
+ | REVOKE
+ | ROLE
+ | SCHEMA
+ | SELECT
+ | SET
+ | SETTLE
+ | SGLEVEL
+ | SHOW
+ | SLIMIT
+ | SOFFSET
+ | STORAGE
+ | START
+ | STOP
+ | SYSTEM
+ | TAGS
+ | TASK
+ | TEMPLATE
+ | TEMPLATES
+ | TIMESERIES
+ | TO
+ | TOLERANCE
+ | TOP
+ | TRACING
+ | TRIGGER
+ | TRIGGERS
+ | TTL
+ | UNLINK
+ | UNLOAD
+ | UNSET
+ | UPDATE
+ | UPSERT
+ | USER
+ | USING
+ | VALUES
+ | VERIFY
+ | VERSION
+ | WHERE
+ | WITH
+ | WITHOUT
+ | WRITABLE
+ | PRIVILEGE_VALUE
+ ;
\ No newline at end of file
diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4
index 0ffdcb9f2288..381f3e297977 100644
--- a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4
+++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4
@@ -21,6 +21,8 @@ parser grammar InfluxDBSqlParser;
options { tokenVocab=SqlLexer; }
+import IdentifierParser;
+
singleStatement
: statement SEMI? EOF
;
@@ -71,20 +73,12 @@ fromClause
nodeName
: STAR
- | ID
- | QUOTED_ID
+ | identifier
| LAST
| COUNT
| DEVICE
;
-// Identifier
-
-identifier
- : ID
- | QUOTED_ID
- ;
-
// Constant & Literal
@@ -126,4 +120,4 @@ realLiteral
datetimeLiteral
: DATETIME_LITERAL
| NOW LR_BRACKET RR_BRACKET
- ;
+ ;
\ No newline at end of file
diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4
index 56ca7bd9b576..1dbb03f7cecf 100644
--- a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4
+++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4
@@ -21,6 +21,7 @@ parser grammar IoTDBSqlParser;
options { tokenVocab=SqlLexer; }
+import IdentifierParser;
/**
* 1. Top Level Description
@@ -42,7 +43,7 @@ ddlStatement
| dropFunction | dropTrigger | dropContinuousQuery | dropSchemaTemplate
| setTTL | unsetTTL | startTrigger | stopTrigger | setSchemaTemplate | unsetSchemaTemplate
| showStorageGroup | showDevices | showTimeseries | showChildPaths | showChildNodes
- | showFunctions | showTriggers | showContinuousQueries | showTTL | showAllTTL
+ | showFunctions | showTriggers | showContinuousQueries | showTTL | showAllTTL | showCluster | showRegion
| showSchemaTemplates | showNodesInSchemaTemplate
| showPathsUsingSchemaTemplate | showPathsSetSchemaTemplate
| countStorageGroup | countDevices | countTimeseries | countNodes
@@ -59,7 +60,7 @@ dclStatement
;
utilityStatement
- : merge | fullMerge | flush | clearCache | settle
+ : merge | fullMerge | flush | clearCache | settle | explain
| setSystemStatus | showVersion | showFlushInfo | showLockInfo | showQueryResource
| showQueryProcesslist | killQuery | grantWatermarkEmbedding | revokeWatermarkEmbedding
| loadConfiguration | loadTimeseries | loadFile | removeFile | unloadFile;
@@ -75,15 +76,19 @@ syncStatement
// Create Storage Group
setStorageGroup
- : SET STORAGE GROUP TO prefixPath (WITH storageGroupAttributeClause (COMMA storageGroupAttributeClause)*)?
+ : SET STORAGE GROUP TO prefixPath storageGroupAttributesClause?
;
-storageGroupAttributeClause
- : (TTL | SCHEMA_REPLICATION_FACTOR | DATA_REPLICATION_FACTOR | TIME_PARTITION_INTERVAL) '=' INTEGER_LITERAL
+createStorageGroup
+ : CREATE STORAGE GROUP prefixPath storageGroupAttributesClause?
;
-createStorageGroup
- : CREATE STORAGE GROUP prefixPath
+storageGroupAttributesClause
+ : WITH storageGroupAttributeClause (COMMA storageGroupAttributeClause)*
+ ;
+
+storageGroupAttributeClause
+ : (TTL | SCHEMA_REPLICATION_FACTOR | DATA_REPLICATION_FACTOR | TIME_PARTITION_INTERVAL) '=' INTEGER_LITERAL
;
// Create Timeseries
@@ -114,7 +119,11 @@ createTimeseriesOfSchemaTemplate
// Create Function
createFunction
- : CREATE FUNCTION udfName=identifier AS className=STRING_LITERAL
+ : CREATE FUNCTION udfName=identifier AS className=STRING_LITERAL (USING uri (COMMA uri)*)?
+ ;
+
+uri
+ : STRING_LITERAL
;
// Create Trigger
@@ -160,7 +169,7 @@ alterTimeseries
alterClause
: RENAME beforeName=attributeKey TO currentName=attributeKey
| SET attributePair (COMMA attributePair)*
- | DROP STRING_LITERAL (COMMA STRING_LITERAL)*
+ | DROP attributeKey (COMMA attributeKey)*
| ADD TAGS attributePair (COMMA attributePair)*
| ADD ATTRIBUTES attributePair (COMMA attributePair)*
| UPSERT aliasClause? tagClause? attributeClause?
@@ -294,6 +303,16 @@ showAllTTL
: SHOW ALL TTL
;
+// Show Cluster
+showCluster
+ : SHOW CLUSTER
+ ;
+
+// Show Region
+showRegion
+ : SHOW (SCHEMA | DATA)? REGIONS
+ ;
+
// Show Schema Template
showSchemaTemplates
: SHOW SCHEMA? TEMPLATES
@@ -411,10 +430,10 @@ withoutNullClause
;
oldTypeClause
- : (dataType=DATATYPE_VALUE | ALL) LS_BRACKET linearClause RS_BRACKET
- | (dataType=DATATYPE_VALUE | ALL) LS_BRACKET previousClause RS_BRACKET
- | (dataType=DATATYPE_VALUE | ALL) LS_BRACKET specificValueClause RS_BRACKET
- | (dataType=DATATYPE_VALUE | ALL) LS_BRACKET previousUntilLastClause RS_BRACKET
+ : (ALL | dataType=attributeValue) LS_BRACKET linearClause RS_BRACKET
+ | (ALL | dataType=attributeValue) LS_BRACKET previousClause RS_BRACKET
+ | (ALL | dataType=attributeValue) LS_BRACKET specificValueClause RS_BRACKET
+ | (ALL | dataType=attributeValue) LS_BRACKET previousUntilLastClause RS_BRACKET
;
linearClause
@@ -491,7 +510,7 @@ alterUser
// Grant User Privileges
grantUser
- : GRANT USER userName=identifier PRIVILEGES privileges ON prefixPath
+ : GRANT USER userName=identifier PRIVILEGES privileges (ON prefixPath)?
;
// Grant Role Privileges
@@ -506,7 +525,7 @@ grantRoleToUser
// Revoke User Privileges
revokeUser
- : REVOKE USER userName=identifier PRIVILEGES privileges ON prefixPath
+ : REVOKE USER userName=identifier PRIVILEGES privileges (ON prefixPath)?
;
// Revoke Role Privileges
@@ -600,7 +619,7 @@ fullMerge
// Flush
flush
- : FLUSH prefixPath? (COMMA prefixPath)* BOOLEAN_LITERAL?
+ : FLUSH prefixPath? (COMMA prefixPath)* BOOLEAN_LITERAL? (ON (LOCAL | CLUSTER))?
;
// Clear Cache
@@ -613,6 +632,11 @@ settle
: SETTLE (prefixPath|tsFilePath=STRING_LITERAL)
;
+// Explain
+explain
+ : EXPLAIN selectStatement
+ ;
+
// Set System To ReadOnly/Writable
setSystemStatus
: SET SYSTEM TO (READONLY|WRITABLE)
@@ -789,14 +813,6 @@ wildcard
;
-// Identifier
-
-identifier
- : ID
- | QUOTED_ID
- ;
-
-
// Constant & Literal
constant
@@ -845,6 +861,7 @@ expression
| leftExpression=expression (PLUS | MINUS) rightExpression=expression
| leftExpression=expression (OPERATOR_GT | OPERATOR_GTE | OPERATOR_LT | OPERATOR_LTE | OPERATOR_SEQ | OPERATOR_DEQ | OPERATOR_NEQ) rightExpression=expression
| unaryBeforeRegularOrLikeExpression=expression (REGEXP | LIKE) STRING_LITERAL
+ | unaryBeforeIsNullExpression=expression OPERATOR_IS OPERATOR_NOT? NULL_LITERAL
| unaryBeforeInExpression=expression OPERATOR_NOT? (OPERATOR_IN | OPERATOR_CONTAINS) LR_BRACKET constant (COMMA constant)* RR_BRACKET
| leftExpression=expression OPERATOR_AND rightExpression=expression
| leftExpression=expression OPERATOR_OR rightExpression=expression
@@ -886,16 +903,12 @@ fromClause
// Attribute Clause
attributeClauses
- : aliasNodeName? WITH DATATYPE operator_eq dataType=DATATYPE_VALUE
- (COMMA ENCODING operator_eq encoding=ENCODING_VALUE)?
- (COMMA (COMPRESSOR | COMPRESSION) operator_eq compressor=COMPRESSOR_VALUE)?
+ : aliasNodeName? WITH attributeKey operator_eq dataType=attributeValue
(COMMA attributePair)*
tagClause?
attributeClause?
// Simplified version (supported since v0.13)
- | aliasNodeName? WITH? (DATATYPE operator_eq)? dataType=DATATYPE_VALUE
- (ENCODING operator_eq encoding=ENCODING_VALUE)?
- ((COMPRESSOR | COMPRESSION) operator_eq compressor=COMPRESSOR_VALUE)?
+ | aliasNodeName? WITH? (attributeKey operator_eq)? dataType=attributeValue
attributePair*
tagClause?
attributeClause?
@@ -914,7 +927,7 @@ attributeClause
;
attributePair
- : key=attributeKey (OPERATOR_SEQ | OPERATOR_DEQ) value=attributeValue
+ : key=attributeKey operator_eq value=attributeValue
;
attributeKey
diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/PathParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/PathParser.g4
new file mode 100644
index 000000000000..546be58b6eee
--- /dev/null
+++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/PathParser.g4
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+parser grammar PathParser;
+
+options { tokenVocab=SqlLexer; }
+
+import IdentifierParser;
+
+/**
+ * PartialPath and Path used by Session API and TsFile API should be parsed by Antlr4.
+ */
+
+path
+ : prefixPath EOF
+ | suffixPath EOF
+ ;
+
+prefixPath
+ : ROOT (DOT nodeName)*
+ ;
+
+suffixPath
+ : nodeName (DOT nodeName)*
+ ;
+
+nodeName
+ : wildcard
+ | wildcard? identifier wildcard?
+ | identifier
+ ;
+
+wildcard
+ : STAR
+ | DOUBLE_STAR
+ ;
diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4
index f92fcbba5b4f..7e9b769e880e 100644
--- a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4
+++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4
@@ -32,7 +32,7 @@ WS
/**
- * 2. Keywords
+ * 2. Keywords, new keywords should be added into IdentifierParser.g4
*/
// Common Keywords
@@ -117,12 +117,8 @@ CLEAR
: C L E A R
;
-COMPRESSION
- : C O M P R E S S I O N
- ;
-
-COMPRESSOR
- : C O M P R E S S O R
+CLUSTER
+ : C L U S T E R
;
CONCAT
@@ -157,8 +153,8 @@ CREATE
: C R E A T E
;
-DATATYPE
- : D A T A T Y P E
+DATA
+ : D A T A
;
DEBUG
@@ -193,10 +189,6 @@ DROP
: D R O P
;
-ENCODING
- : E N C O D I N G
- ;
-
END
: E N D
;
@@ -310,6 +302,10 @@ LOAD
: L O A D
;
+LOCAL
+ : L O C A L
+ ;
+
LOCK
: L O C K
;
@@ -426,6 +422,10 @@ REGEXP
: R E G E X P
;
+REGIONS
+ : R E G I O N S
+ ;
+
REMOVE
: R E M O V E
;
@@ -619,103 +619,6 @@ WRITABLE
;
-// Data Type Keywords
-
-DATATYPE_VALUE
- : BOOLEAN | DOUBLE | FLOAT | INT32 | INT64 | TEXT
- ;
-
-BOOLEAN
- : B O O L E A N
- ;
-
-DOUBLE
- : D O U B L E
- ;
-
-FLOAT
- : F L O A T
- ;
-
-INT32
- : I N T '3' '2'
- ;
-
-INT64
- : I N T '6' '4'
- ;
-
-TEXT
- : T E X T
- ;
-
-
-// Encoding Type Keywords
-
-ENCODING_VALUE
- : DICTIONARY | DIFF | GORILLA | PLAIN | REGULAR | RLE | TS_2DIFF | ZIGZAG | FREQ
- ;
-
-DICTIONARY
- : D I C T I O N A R Y
- ;
-
-DIFF
- : D I F F
- ;
-
-GORILLA
- : G O R I L L A
- ;
-
-PLAIN
- : P L A I N
- ;
-
-REGULAR
- : R E G U L A R
- ;
-
-RLE
- : R L E
- ;
-
-TS_2DIFF
- : T S '_' '2' D I F F
- ;
-
-ZIGZAG
- : Z I G Z A G
- ;
-
-FREQ
- : F R E Q
- ;
-
-
-// Compressor Type Keywords
-
-COMPRESSOR_VALUE
- : GZIP | LZ4 | SNAPPY | UNCOMPRESSED
- ;
-
-GZIP
- : G Z I P
- ;
-
-LZ4
- : L Z '4'
- ;
-
-SNAPPY
- : S N A P P Y
- ;
-
-UNCOMPRESSED
- : U N C O M P R E S S E D
- ;
-
-
// Privileges Keywords
PRIVILEGE_VALUE
@@ -870,6 +773,8 @@ OPERATOR_LT : '<';
OPERATOR_LTE : '<=';
OPERATOR_NEQ : '!=' | '<>';
+OPERATOR_IS : I S;
+
OPERATOR_IN : I N;
OPERATOR_AND
@@ -973,9 +878,8 @@ NAN_LITERAL
: N A N
;
-
/**
- * 6. Identifier
+ * 6. ID
*/
ID
@@ -1005,15 +909,15 @@ fragment CN_CHAR
;
fragment DQUOTA_STRING
- : '"' ( '\\'. | '""' | ~('"'| '\\') )* '"'
+ : '"' ( '\\'. | '""' | ~('"') )* '"'
;
fragment SQUOTA_STRING
- : '\'' ( '\\'. | '\'\'' |~('\''| '\\') )* '\''
+ : '\'' ( '\\'. | '\'\'' |~('\'') )* '\''
;
fragment BQUOTA_STRING
- : '`' ( '\\' ~('`') | '``' | ~('`'| '\\') )* '`'
+ : '`' ( '\\' ~('`') | '``' | ~('`') )* '`'
;
diff --git a/checkstyle.xml b/checkstyle.xml
index 4d4eb175a92d..f74e443f9e7c 100644
--- a/checkstyle.xml
+++ b/checkstyle.xml
@@ -40,7 +40,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -52,13 +70,6 @@
-
-
-
-
-
-
-
@@ -212,13 +223,10 @@
-
-
+
-
-
diff --git a/cli/src/assembly/resources/sbin/start-cli.bat b/cli/src/assembly/resources/sbin/start-cli.bat
index 21bb4000c522..cbd375e6f2b7 100644
--- a/cli/src/assembly/resources/sbin/start-cli.bat
+++ b/cli/src/assembly/resources/sbin/start-cli.bat
@@ -37,7 +37,7 @@ set JAVA_OPTS=-ea^
-DIOTDB_HOME="%IOTDB_HOME%"
REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable.
-set CLASSPATH="%IOTDB_HOME%\lib\*"
+if EXIST %IOTDB_HOME%\lib (set CLASSPATH="%IOTDB_HOME%\lib\*") else set CLASSPATH="%IOTDB_HOME%\..\lib\*"
REM -----------------------------------------------------------------------------
set PARAMETERS=%*
diff --git a/cli/src/assembly/resources/sbin/start-cli.sh b/cli/src/assembly/resources/sbin/start-cli.sh
index 20fb4506a027..dbeedc725059 100644
--- a/cli/src/assembly/resources/sbin/start-cli.sh
+++ b/cli/src/assembly/resources/sbin/start-cli.sh
@@ -30,8 +30,14 @@ IOTDB_CLI_CONF=${IOTDB_HOME}/conf
MAIN_CLASS=org.apache.iotdb.cli.Cli
+if [ -d ${IOTDB_HOME}/lib ]; then
+LIB_PATH=${IOTDB_HOME}/lib
+else
+LIB_PATH=${IOTDB_HOME}/../lib
+fi
+
CLASSPATH=""
-for f in ${IOTDB_HOME}/lib/*.jar; do
+for f in ${LIB_PATH}/*.jar; do
CLASSPATH=${CLASSPATH}":"$f
done
diff --git a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
index 14374603173a..ae0b97f31890 100644
--- a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
+++ b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java
@@ -714,8 +714,7 @@ private static boolean queryType(
try {
sessionDataSet = session.executeQueryStatement(sql);
} catch (StatementExecutionException e) {
- System.out.println(
- "Meet error when query the type of timeseries because the IoTDB v0.13 don't support that the path contains any purely digital path.");
+ System.out.println("Meet error when query the type of timeseries because " + e.getMessage());
return false;
}
List columnNames = sessionDataSet.getColumnNames();
diff --git a/client-py/README.md b/client-py/README.md
index 41c0a113b8f1..beef55e3513b 100644
--- a/client-py/README.md
+++ b/client-py/README.md
@@ -273,6 +273,98 @@ session.execute_query_statement(sql)
session.execute_non_query_statement(sql)
```
+* Execute statement
+
+```python
+session.execute_statement(sql)
+```
+
+### Schema Template
+#### Create Schema Template
+The step for creating a metadata template is as follows
+1. Create the template class
+2. Adding child Node,InternalNode and MeasurementNode can be chose
+3. Execute create schema template function
+
+```python
+template = Template(name=template_name, share_time=True)
+
+i_node_gps = InternalNode(name="GPS", share_time=False)
+i_node_v = InternalNode(name="vehicle", share_time=True)
+m_node_x = MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY)
+
+i_node_gps.add_child(m_node_x)
+i_node_v.add_child(m_node_x)
+
+template.add_template(i_node_gps)
+template.add_template(i_node_v)
+template.add_template(m_node_x)
+
+session.create_schema_template(template)
+```
+#### Modify Schema Template nodes
+Modify nodes in a template, the template must be already created. These are functions that add or delete some measurement nodes.
+* add node in template
+```python
+session.add_measurements_in_template(template_name, measurements_path, data_types, encodings, compressors, is_aligned)
+```
+
+* delete node in template
+```python
+session.delete_node_in_template(template_name, path)
+```
+
+#### Set Schema Template
+```python
+session.set_schema_template(template_name, prefix_path)
+```
+
+#### Uset Schema Template
+```python
+session.unset_schema_template(template_name, prefix_path)
+```
+
+#### Show Schema Template
+* Show all schema templates
+```python
+session.show_all_templates()
+```
+* Count all nodes in templates
+```python
+session.count_measurements_in_template(template_name)
+```
+
+* Judge whether the path is measurement or not in templates, This measurement must be in the template
+```python
+session.count_measurements_in_template(template_name, path)
+```
+
+* Judge whether the path is exist or not in templates, This path may not belong to the template
+```python
+session.is_path_exist_in_template(template_name, path)
+```
+
+* Show nodes under in schema template
+```python
+session.show_measurements_in_template(template_name)
+```
+
+* Show the path prefix where a schema template is set
+```python
+session.show_paths_template_set_on(template_name)
+```
+
+* Show the path prefix where a schema template is used (i.e. the time series has been created)
+```python
+session.show_paths_template_using_on(template_name)
+```
+
+#### Drop Schema Template
+Delete an existing metadata template,dropping an already set template is not supported
+```python
+session.drop_schema_template("template_python")
+```
+
### Pandas Support
@@ -322,6 +414,150 @@ class MyTestCase(unittest.TestCase):
by default it will load the image `apache/iotdb:latest`, if you want a specific version just pass it like e.g. `IoTDBContainer("apache/iotdb:0.12.0")` to get version `0.12.0` running.
+### IoTDB DBAPI
+
+IoTDB DBAPI implements the Python DB API 2.0 specification (https://peps.python.org/pep-0249/), which defines a common
+interface for accessing databases in Python.
+
+#### Examples
++ Initialization
+
+The initialized parameters are consistent with the session part (except for the sqlalchemy_mode).
+```python
+from iotdb.dbapi import connect
+
+ip = "127.0.0.1"
+port_ = "6667"
+username_ = "root"
+password_ = "root"
+conn = connect(ip, port_, username_, password_,fetch_size=1024,zone_id="UTC+8",sqlalchemy_mode=False)
+cursor = conn.cursor()
+```
++ simple SQL statement execution
+```python
+cursor.execute("SELECT * FROM root.*")
+for row in cursor.fetchall():
+ print(row)
+```
+
++ execute SQL with parameter
+
+IoTDB DBAPI supports pyformat style parameters
+```python
+cursor.execute("SELECT * FROM root.* WHERE time < %(time)s",{"time":"2017-11-01T00:08:00.000"})
+for row in cursor.fetchall():
+ print(row)
+```
+
++ execute SQL with parameter sequences
+```python
+seq_of_parameters = [
+ {"timestamp": 1, "temperature": 1},
+ {"timestamp": 2, "temperature": 2},
+ {"timestamp": 3, "temperature": 3},
+ {"timestamp": 4, "temperature": 4},
+ {"timestamp": 5, "temperature": 5},
+]
+sql = "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(temperature)s)"
+cursor.executemany(sql,seq_of_parameters)
+```
+
++ close the connection and cursor
+```python
+cursor.close()
+conn.close()
+```
+
+### IoTDB SQLAlchemy Dialect (Experimental)
+The SQLAlchemy dialect of IoTDB is written to adapt to Apache Superset.
+This part is still being improved.
+Please do not use it in the production environment!
+#### Mapping of the metadata
+The data model used by SQLAlchemy is a relational data model, which describes the relationships between different entities through tables.
+While the data model of IoTDB is a hierarchical data model, which organizes the data through a tree structure.
+In order to adapt IoTDB to the dialect of SQLAlchemy, the original data model in IoTDB needs to be reorganized.
+Converting the data model of IoTDB into the data model of SQLAlchemy.
+
+The metadata in the IoTDB are:
+
+1. Storage Group
+2. Path
+3. Entity
+4. Measurement
+
+The metadata in the SQLAlchemy are:
+1. Schema
+2. Table
+3. Column
+
+The mapping relationship between them is:
+
+| The metadata in the SQLAlchemy | The metadata in the IoTDB |
+| -------------------- | ---------------------------------------------- |
+| Schema | Storage Group |
+| Table | Path ( from storage group to entity ) + Entity |
+| Column | Measurement |
+
+The following figure shows the relationship between the two more intuitively:
+
+![sqlalchemy-to-iotdb](https://github.com/apache/iotdb-bin-resources/blob/main/docs/UserGuide/API/IoTDB-SQLAlchemy/sqlalchemy-to-iotdb.png?raw=true)
+
+#### Data type mapping
+| data type in IoTDB | data type in SQLAlchemy |
+|--------------------|-------------------------|
+| BOOLEAN | Boolean |
+| INT32 | Integer |
+| INT64 | BigInteger |
+| FLOAT | Float |
+| DOUBLE | Float |
+| TEXT | Text |
+| LONG | BigInteger |
+#### Example
+
++ execute statement
+
+```python
+from sqlalchemy import create_engine
+
+engine = create_engine("iotdb://root:root@127.0.0.1:6667")
+connect = engine.connect()
+result = connect.execute("SELECT ** FROM root")
+for row in result.fetchall():
+ print(row)
+```
+
++ ORM (now only simple queries are supported)
+
+```python
+from sqlalchemy import create_engine, Column, Float, BigInteger, MetaData
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+
+metadata = MetaData(
+ schema='root.factory'
+)
+Base = declarative_base(metadata=metadata)
+
+
+class Device(Base):
+ __tablename__ = "room2.device1"
+ Time = Column(BigInteger, primary_key=True)
+ temperature = Column(Float)
+ status = Column(Float)
+
+
+engine = create_engine("iotdb://root:root@127.0.0.1:6667")
+
+DbSession = sessionmaker(bind=engine)
+session = DbSession()
+
+res = session.query(Device.status).filter(Device.temperature > 1)
+
+for row in res:
+ print(row)
+```
+
+
## Developers
### Introduction
diff --git a/client-py/SessionExample.py b/client-py/SessionExample.py
index 93aa839c3bd4..61e82234dbf9 100644
--- a/client-py/SessionExample.py
+++ b/client-py/SessionExample.py
@@ -20,6 +20,9 @@
import numpy as np
from iotdb.Session import Session
+from iotdb.template.InternalNode import InternalNode
+from iotdb.template.MeasurementNode import MeasurementNode
+from iotdb.template.Template import Template
from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
from iotdb.utils.Tablet import Tablet
from iotdb.utils.NumpyTablet import NumpyTablet
@@ -280,6 +283,17 @@
while session_data_set.has_next():
print(session_data_set.next())
+# execute statement
+with session.execute_statement(
+ "select * from root.sg_test_01.d_01"
+) as session_data_set:
+ while session_data_set.has_next():
+ print(session_data_set.next())
+
+session.execute_statement(
+ "insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)"
+)
+
# insert string records of one device
time_list = [1, 2, 3]
measurements_list = [
@@ -313,6 +327,90 @@
# delete storage group
session.delete_storage_group("root.sg_test_01")
+# create measurement node template
+template = Template(name="template_python", share_time=False)
+m_node_1 = MeasurementNode(
+ name="s1",
+ data_type=TSDataType.INT64,
+ encoding=TSEncoding.RLE,
+ compression_type=Compressor.SNAPPY,
+)
+m_node_2 = MeasurementNode(
+ name="s2",
+ data_type=TSDataType.INT64,
+ encoding=TSEncoding.RLE,
+ compression_type=Compressor.SNAPPY,
+)
+m_node_3 = MeasurementNode(
+ name="s3",
+ data_type=TSDataType.INT64,
+ encoding=TSEncoding.RLE,
+ compression_type=Compressor.SNAPPY,
+)
+template.add_template(m_node_1)
+template.add_template(m_node_2)
+template.add_template(m_node_3)
+session.create_schema_template(template)
+print("create template success template_python")
+
+# create internal node template
+template_name = "treeTemplate_python"
+template = Template(name=template_name, share_time=True)
+i_node_gps = InternalNode(name="GPS", share_time=False)
+i_node_v = InternalNode(name="vehicle", share_time=True)
+m_node_x = MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY)
+
+i_node_gps.add_child(m_node_x)
+i_node_v.add_child(m_node_x)
+template.add_template(i_node_gps)
+template.add_template(i_node_v)
+template.add_template(m_node_x)
+
+session.create_schema_template(template)
+print("create template success treeTemplate_python}")
+
+print(session.is_measurement_in_template(template_name, "GPS"))
+print(session.is_measurement_in_template(template_name, "GPS.x"))
+print(session.show_all_templates())
+
+# # append schema template
+data_types = [TSDataType.FLOAT, TSDataType.FLOAT, TSDataType.DOUBLE]
+encoding_list = [TSEncoding.RLE, TSEncoding.RLE, TSEncoding.GORILLA]
+compressor_list = [Compressor.SNAPPY, Compressor.SNAPPY, Compressor.LZ4]
+
+measurements_aligned_path = ["aligned.s1", "aligned.s2", "aligned.s3"]
+session.add_measurements_in_template(
+ template_name,
+ measurements_aligned_path,
+ data_types,
+ encoding_list,
+ compressor_list,
+ is_aligned=True,
+)
+# session.drop_schema_template("add_template_python")
+measurements_aligned_path = ["unaligned.s1", "unaligned.s2", "unaligned.s3"]
+session.add_measurements_in_template(
+ template_name,
+ measurements_aligned_path,
+ data_types,
+ encoding_list,
+ compressor_list,
+ is_aligned=False,
+)
+session.delete_node_in_template(template_name, "aligned.s1")
+print(session.count_measurements_in_template(template_name))
+print(session.is_path_exist_in_template(template_name, "aligned.s1"))
+print(session.is_path_exist_in_template(template_name, "aligned.s2"))
+
+session.set_schema_template(template_name, "root.python.set")
+print(session.show_paths_template_using_on(template_name))
+print(session.show_paths_template_set_on(template_name))
+session.unset_schema_template(template_name, "root.python.set")
+
+# drop template
+session.drop_schema_template("template_python")
+session.drop_schema_template(template_name)
+print("drop template success, template_python and treeTemplate_python")
# close session connection.
session.close()
diff --git a/client-py/iotdb/Session.py b/client-py/iotdb/Session.py
index 780ecc7d7c35..c5a83d3e053f 100644
--- a/client-py/iotdb/Session.py
+++ b/client-py/iotdb/Session.py
@@ -18,12 +18,12 @@
import logging
import struct
import time
-
-from iotdb.utils.SessionDataSet import SessionDataSet
-
from thrift.protocol import TBinaryProtocol, TCompactProtocol
from thrift.transport import TSocket, TTransport
+from iotdb.utils.SessionDataSet import SessionDataSet
+from .template.Template import Template
+from .template.TemplateQueryType import TemplateQueryType
from .thrift.rpc.TSIService import (
Client,
TSCreateTimeseriesReq,
@@ -38,6 +38,13 @@
TSInsertTabletsReq,
TSInsertRecordsReq,
TSInsertRecordsOfOneDeviceReq,
+ TSCreateSchemaTemplateReq,
+ TSDropSchemaTemplateReq,
+ TSAppendSchemaTemplateReq,
+ TSPruneSchemaTemplateReq,
+ TSSetSchemaTemplateReq,
+ TSUnsetSchemaTemplateReq,
+ TSQueryTemplateReq,
)
from .thrift.rpc.ttypes import (
TSDeleteDataReq,
@@ -47,7 +54,6 @@
TSLastDataQueryReq,
TSInsertStringRecordsOfOneDeviceReq,
)
-
# for debug
# from IoTDBConstants import *
# from SessionDataSet import SessionDataSet
@@ -1027,12 +1033,19 @@ def verify_success(status):
if status.code == Session.SUCCESS_CODE:
return 0
- logger.error("error status is", status)
+ logger.error("error status is %s", status)
return -1
def execute_raw_data_query(
self, paths: list, start_time: int, end_time: int
) -> SessionDataSet:
+ """
+ execute query statement and returns SessionDataSet
+ :param paths: String path list
+ :param start_time: Query start time
+ :param end_time: Query end time
+ :return: SessionDataSet, contains query results and relevant info (see SessionDataSet.py)
+ """
request = TSRawDataQueryReq(
self.__session_id,
paths,
@@ -1057,6 +1070,12 @@ def execute_raw_data_query(
)
def execute_last_data_query(self, paths: list, last_time: int) -> SessionDataSet:
+ """
+ execute query statement and returns SessionDataSet
+ :param paths: String path list
+ :param last_time: Query last time
+ :return: SessionDataSet, contains query results and relevant info (see SessionDataSet.py)
+ """
request = TSLastDataQueryReq(
self.__session_id,
paths,
@@ -1088,6 +1107,16 @@ def insert_string_records_of_one_device(
values_list: list,
have_sorted: bool = False,
):
+ """
+ insert multiple row of string record into database:
+ timestamp, m1, m2, m3
+ 0, text1, text2, text3
+ :param device_id: String, device id
+ :param times: Timestamp list
+ :param measurements_list: Measurements list
+ :param values_list: Value list
+ :param have_sorted: have these list been sorted by timestamp
+ """
if (len(times) != len(measurements_list)) or (len(times) != len(values_list)):
raise RuntimeError(
"insert records of one device error: times, measurementsList and valuesList's size should be equal!"
@@ -1151,3 +1180,274 @@ def gen_insert_string_records_of_one_device_request(
is_aligned,
)
return request
+
+ def create_schema_template(self, template: Template):
+ """
+ create schema template, users using this method should use the template class as an argument
+ :param template: The template contains multiple child node(see Template.py)
+ """
+ bytes_array = template.serialize
+ request = TSCreateSchemaTemplateReq(
+ self.__session_id, template.get_name(), bytes_array
+ )
+ status = self.__client.createSchemaTemplate(request)
+ logger.debug(
+ "create one template {} template name: {}".format(
+ self.__session_id, template.get_name()
+ )
+ )
+ return Session.verify_success(status)
+
+ def drop_schema_template(self, template_name: str):
+ """
+ drop schema template, this method should be used to the template unset anything
+ :param template_name: template name
+ """
+ request = TSDropSchemaTemplateReq(self.__session_id, template_name)
+ status = self.__client.dropSchemaTemplate(request)
+ logger.debug(
+ "drop one template {} template name: {}".format(
+ self.__session_id, template_name
+ )
+ )
+ return Session.verify_success(status)
+
+ def execute_statement(self, sql: str, timeout=0):
+ request = TSExecuteStatementReq(
+ self.__session_id, sql, self.__statement_id, self.__fetch_size, timeout
+ )
+ try:
+ resp = self.__client.executeStatement(request)
+ status = resp.status
+ logger.debug("execute statement {} message: {}".format(sql, status.message))
+ if Session.verify_success(status) == 0:
+ if resp.columns:
+ return SessionDataSet(
+ sql,
+ resp.columns,
+ resp.dataTypeList,
+ resp.columnNameIndexMap,
+ resp.queryId,
+ self.__client,
+ self.__statement_id,
+ self.__session_id,
+ resp.queryDataSet,
+ resp.ignoreTimeStamp,
+ )
+ else:
+ return None
+ else:
+ raise RuntimeError(
+ "execution of statement fails because: {}", status.message
+ )
+ except TTransport.TException as e:
+ raise RuntimeError("execution of statement fails because: ", e)
+
+ def add_measurements_in_template(
+ self,
+ template_name: str,
+ measurements_path: list,
+ data_types: list,
+ encodings: list,
+ compressors: list,
+ is_aligned: bool = False,
+ ):
+ """
+ add measurements in the template, the template must already create. This function adds some measurements node.
+ :param template_name: template name, string list, like ["name_x", "name_y", "name_z"]
+ :param measurements_path: when ths is_aligned is True, recommend the name like a.b, like [python.x, python.y, iotdb.z]
+ :param data_types: using TSDataType(see IoTDBConstants.py)
+ :param encodings: using TSEncoding(see IoTDBConstants.py)
+ :param compressors: using Compressor(see IoTDBConstants.py)
+ :param is_aligned: True is aligned, False is unaligned
+ """
+ request = TSAppendSchemaTemplateReq(
+ self.__session_id,
+ template_name,
+ is_aligned,
+ measurements_path,
+ list(map(lambda x: x.value, data_types)),
+ list(map(lambda x: x.value, encodings)),
+ list(map(lambda x: x.value, compressors)),
+ )
+ status = self.__client.appendSchemaTemplate(request)
+ logger.debug(
+ "append unaligned template {} template name: {}".format(
+ self.__session_id, template_name
+ )
+ )
+ return Session.verify_success(status)
+
+ def delete_node_in_template(self, template_name: str, path: str):
+ """
+ delete a node in the template, this node must be already in the template
+ :param template_name: template name
+ :param path: measurements path
+ """
+ request = TSPruneSchemaTemplateReq(self.__session_id, template_name, path)
+ status = self.__client.pruneSchemaTemplate(request)
+ logger.debug(
+ "append unaligned template {} template name: {}".format(
+ self.__session_id, template_name
+ )
+ )
+ return Session.verify_success(status)
+
+ def set_schema_template(self, template_name, prefix_path):
+ """
+ set template in prefix path, template already exit, prefix path is not measurements
+ :param template_name: template name
+ :param prefix_path: prefix path
+ """
+ request = TSSetSchemaTemplateReq(self.__session_id, template_name, prefix_path)
+ status = self.__client.setSchemaTemplate(request)
+ logger.debug(
+ "set schema template to path{} template name: {}, path:{}".format(
+ self.__session_id, template_name, prefix_path
+ )
+ )
+ return Session.verify_success(status)
+
+ def unset_schema_template(self, template_name, prefix_path):
+ """
+ unset schema template from prefix path, this method unsetting the template from entities,
+ which have already inserted records using the template, is not supported.
+ :param template_name: template name
+ :param prefix_path:
+ """
+ request = TSUnsetSchemaTemplateReq(
+ self.__session_id, prefix_path, template_name
+ )
+ status = self.__client.unsetSchemaTemplate(request)
+ logger.debug(
+ "set schema template to path{} template name: {}, path:{}".format(
+ self.__session_id, template_name, prefix_path
+ )
+ )
+ return Session.verify_success(status)
+
+ def count_measurements_in_template(self, template_name: str):
+ """
+ drop schema template, this method should be used to the template unset anything
+ :param template_name: template name
+ """
+ request = TSQueryTemplateReq(
+ self.__session_id,
+ template_name,
+ TemplateQueryType.COUNT_MEASUREMENTS.value,
+ )
+ response = self.__client.querySchemaTemplate(request)
+ logger.debug(
+ "count measurements template {}, template name is {}, count is {}".format(
+ self.__session_id, template_name, response.measurements
+ )
+ )
+ return response.count
+
+ def is_measurement_in_template(self, template_name: str, path: str):
+ """
+ judge the node in the template is measurement or not, this node must in the template
+ :param template_name: template name
+ :param path:
+ """
+ request = TSQueryTemplateReq(
+ self.__session_id,
+ template_name,
+ TemplateQueryType.IS_MEASUREMENT.value,
+ path,
+ )
+ response = self.__client.querySchemaTemplate(request)
+ logger.debug(
+ "judge the path is measurement or not in template {}, template name is {}, result is {}".format(
+ self.__session_id, template_name, response.result
+ )
+ )
+ return response.result
+
+ def is_path_exist_in_template(self, template_name: str, path: str):
+ """
+ judge whether the node is a measurement or not in the template, this node must be in the template
+ :param template_name: template name
+ :param path:
+ """
+ request = TSQueryTemplateReq(
+ self.__session_id, template_name, TemplateQueryType.PATH_EXIST.value, path
+ )
+ response = self.__client.querySchemaTemplate(request)
+ logger.debug(
+ "judge the path is in template or not {}, template name is {}, result is {}".format(
+ self.__session_id, template_name, response.result
+ )
+ )
+ return response.result
+
+ def show_measurements_in_template(self, template_name: str, pattern: str = ""):
+ """
+ show all measurements under the pattern in template
+ :param template_name: template name
+ :param pattern: parent path, if default, show all measurements
+ """
+ request = TSQueryTemplateReq(
+ self.__session_id,
+ template_name,
+ TemplateQueryType.SHOW_MEASUREMENTS.value,
+ pattern,
+ )
+ response = self.__client.querySchemaTemplate(request)
+ logger.debug(
+ "show measurements in template {}, template name is {}, result is {}".format(
+ self.__session_id, template_name, response.measurements
+ )
+ )
+ return response.measurements
+
+ def show_all_templates(self):
+ """
+ show all schema templates
+ """
+ request = TSQueryTemplateReq(
+ self.__session_id,
+ "",
+ TemplateQueryType.SHOW_TEMPLATES.value,
+ )
+ response = self.__client.querySchemaTemplate(request)
+ logger.debug(
+ "show all template {}, measurements is {}".format(
+ self.__session_id, response.measurements
+ )
+ )
+ return response.measurements
+
+ def show_paths_template_set_on(self, template_name):
+ """
+ show the path prefix where a schema template is set
+ :param template_name:
+ """
+ request = TSQueryTemplateReq(
+ self.__session_id, template_name, TemplateQueryType.SHOW_SET_TEMPLATES.value
+ )
+ response = self.__client.querySchemaTemplate(request)
+ logger.debug(
+ "show paths template set {}, on {}".format(
+ self.__session_id, response.measurements
+ )
+ )
+ return response.measurements
+
+ def show_paths_template_using_on(self, template_name):
+ """
+ show the path prefix where a schema template is used
+ :param template_name:
+ """
+ request = TSQueryTemplateReq(
+ self.__session_id,
+ template_name,
+ TemplateQueryType.SHOW_USING_TEMPLATES.value,
+ )
+ response = self.__client.querySchemaTemplate(request)
+ logger.debug(
+ "show paths template using {}, on {}".format(
+ self.__session_id, response.measurements
+ )
+ )
+ return response.measurements
diff --git a/client-py/iotdb/dbapi/Connection.py b/client-py/iotdb/dbapi/Connection.py
new file mode 100644
index 000000000000..aee5520e9af9
--- /dev/null
+++ b/client-py/iotdb/dbapi/Connection.py
@@ -0,0 +1,91 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import logging
+
+from iotdb.Session import Session
+
+from .Cursor import Cursor
+from .Exceptions import ConnectionError, ProgrammingError
+
+logger = logging.getLogger("IoTDB")
+
+
+class Connection(object):
+ def __init__(
+ self,
+ host,
+ port,
+ username=Session.DEFAULT_USER,
+ password=Session.DEFAULT_PASSWORD,
+ fetch_size=Session.DEFAULT_FETCH_SIZE,
+ zone_id=Session.DEFAULT_ZONE_ID,
+ enable_rpc_compression=False,
+ sqlalchemy_mode=False,
+ ):
+ self.__session = Session(host, port, username, password, fetch_size, zone_id)
+ self.__sqlalchemy_mode = sqlalchemy_mode
+ self.__is_close = True
+ try:
+ self.__session.open(enable_rpc_compression)
+ self.__is_close = False
+ except Exception as e:
+ raise ConnectionError(e)
+
+ def close(self):
+ """
+ Close the connection now
+ """
+ if self.__is_close:
+ return
+ self.__session.close()
+ self.__is_close = True
+
+ def cursor(self):
+ """
+ Return a new Cursor Object using the connection.
+ """
+ if not self.__is_close:
+ return Cursor(self, self.__session, self.__sqlalchemy_mode)
+ else:
+ raise ProgrammingError("Connection closed")
+
+ def commit(self):
+ """
+ Not supported method.
+ """
+ pass
+
+ def rollback(self):
+ """
+ Not supported method.
+ """
+ pass
+
+ @property
+ def is_close(self):
+ """
+ This read-only attribute specified whether the object is closed
+ """
+ return self.__is_close
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
diff --git a/client-py/iotdb/dbapi/Cursor.py b/client-py/iotdb/dbapi/Cursor.py
new file mode 100644
index 000000000000..a1d6e2caabac
--- /dev/null
+++ b/client-py/iotdb/dbapi/Cursor.py
@@ -0,0 +1,288 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import logging
+import warnings
+
+from iotdb.Session import Session
+
+from .Exceptions import ProgrammingError
+
+logger = logging.getLogger("IoTDB")
+
+
+class Cursor(object):
+ def __init__(self, connection, session: Session, sqlalchemy_mode):
+ self.__connection = connection
+ self.__session = session
+ self.__sqlalchemy_mode = sqlalchemy_mode
+ self.__arraysize = 1
+ self.__is_close = False
+ self.__result = None
+ self.__rows = None
+ self.__rowcount = -1
+
+ @property
+ def description(self):
+ """
+ This read-only attribute is a sequence of 7-item sequences.
+ """
+ if self.__is_close or not self.__result["col_names"]:
+ return
+
+ description = []
+
+ col_names = self.__result["col_names"]
+ col_types = self.__result["col_types"]
+
+ for i in range(len(col_names)):
+ description.append(
+ (
+ col_names[i],
+ None if self.__sqlalchemy_mode is True else col_types[i].value,
+ None,
+ None,
+ None,
+ None,
+ col_names[i] == "Time",
+ )
+ )
+ return tuple(description)
+
+ @property
+ def arraysize(self):
+ """
+ This read/write attribute specifies the number of rows to fetch at a time with .fetchmany().
+ """
+ return self.__arraysize
+
+ @arraysize.setter
+ def arraysize(self, value):
+ """
+ Set the arraysize.
+ :param value: arraysize
+ """
+ try:
+ self.__arraysize = int(value)
+ except TypeError:
+ self.__arraysize = 1
+
+ @property
+ def rowcount(self):
+ """
+ This read-only attribute specifies the number of rows that the last
+ .execute*() produced (for DQL statements like ``SELECT``) or affected
+ (for DML statements like ``DELETE`` or ``INSERT`` return 0 if successful, -1 if unsuccessful).
+ """
+ if self.__is_close or self.__result is None or "row_count" not in self.__result:
+ return -1
+ return self.__result.get("row_count", -1)
+
+ def execute(self, operation, parameters=None):
+ """
+ Prepare and execute a database operation (query or command).
+ :param operation: a database operation
+ :param parameters: parameters of the operation
+ """
+ if self.__connection.is_close:
+ raise ProgrammingError("Connection closed!")
+
+ if self.__is_close:
+ raise ProgrammingError("Cursor closed!")
+
+ if parameters is None:
+ sql = operation
+ else:
+ sql = operation % parameters
+
+ time_index = []
+ time_names = []
+ if self.__sqlalchemy_mode:
+ sql_seqs = []
+ seqs = sql.split("\n")
+ for seq in seqs:
+ if seq.find("FROM Time Index") >= 0:
+ time_index = [
+ int(index)
+ for index in seq.replace("FROM Time Index", "").split()
+ ]
+ elif seq.find("FROM Time Name") >= 0:
+ time_names = [
+ name for name in seq.replace("FROM Time Name", "").split()
+ ]
+ else:
+ sql_seqs.append(seq)
+ sql = "\n".join(sql_seqs)
+
+ try:
+ data_set = self.__session.execute_statement(sql)
+ col_names = None
+ col_types = None
+ rows = []
+
+ if data_set:
+ data = data_set.todf()
+
+ if self.__sqlalchemy_mode and time_index:
+ time_column = data.columns[0]
+ time_column_value = data.Time
+ del data[time_column]
+ for i in range(len(time_index)):
+ data.insert(time_index[i], time_names[i], time_column_value)
+
+ col_names = data.columns.tolist()
+ col_types = data_set.get_column_types()
+ rows = data.values.tolist()
+ data_set.close_operation_handle()
+
+ self.__result = {
+ "col_names": col_names,
+ "col_types": col_types,
+ "rows": rows,
+ "row_count": len(rows),
+ }
+ except Exception:
+ logger.error("failed to execute statement:{}".format(sql))
+ self.__result = {
+ "col_names": None,
+ "col_types": None,
+ "rows": [],
+ "row_count": -1,
+ }
+ self.__rows = iter(self.__result["rows"])
+
+ def executemany(self, operation, seq_of_parameters=None):
+ """
+ Prepare a database operation (query or command) and then execute it
+ against all parameter sequences or mappings found in the sequence
+ ``seq_of_parameters``
+ :param operation: a database operation
+ :param seq_of_parameters: pyformat style parameter list of the operation
+ """
+ if self.__connection.is_close:
+ raise ProgrammingError("Connection closed!")
+
+ if self.__is_close:
+ raise ProgrammingError("Cursor closed!")
+
+ rows = []
+ if seq_of_parameters is None:
+ self.execute(operation)
+ rows.extend(self.__result["rows"])
+ else:
+ for parameters in seq_of_parameters:
+ self.execute(operation, parameters)
+ rows.extend(self.__result["rows"])
+
+ self.__result["rows"] = rows
+ self.__rows = iter(self.__result["rows"])
+
+ def fetchone(self):
+ """
+ Fetch the next row of a query result set, returning a single sequence,
+ or None when no more data is available.
+ Alias for ``next()``.
+ """
+ try:
+ return self.next()
+ except StopIteration:
+ return None
+
+ def fetchmany(self, count=None):
+ """
+ Fetch the next set of rows of a query result, returning a sequence of
+ sequences (e.g. a list of tuples). An empty sequence is returned when
+ no more rows are available.
+ """
+ if count is None:
+ count = self.__arraysize
+ if count == 0:
+ return self.fetchall()
+ result = []
+ for i in range(count):
+ try:
+ result.append(self.next())
+ except StopIteration:
+ pass
+ return result
+
+ def fetchall(self):
+ """
+ Fetch all (remaining) rows of a query result, returning them as a
+ sequence of sequences (e.g. a list of tuples). Note that the cursor's
+ arraysize attribute can affect the performance of this operation.
+ """
+ result = []
+ iterate = True
+ while iterate:
+ try:
+ result.append(self.next())
+ except StopIteration:
+ iterate = False
+ return result
+
+ def next(self):
+ """
+ Return the next row of a query result set, respecting if cursor was
+ closed.
+ """
+ if self.__result is None:
+ raise ProgrammingError(
+ "No result available. execute() or executemany() must be called first."
+ )
+ elif not self.__is_close:
+ return next(self.__rows)
+ else:
+ raise ProgrammingError("Cursor closed!")
+
+ __next__ = next
+
+ def close(self):
+ """
+ Close the cursor now.
+ """
+ self.__is_close = True
+ self.__result = None
+
+ def setinputsizes(self, sizes):
+ """
+ Not supported method.
+ """
+ pass
+
+ def setoutputsize(self, size, column=None):
+ """
+ Not supported method.
+ """
+ pass
+
+ def __iter__(self):
+ """
+ Support iterator interface:
+ http://legacy.python.org/dev/peps/pep-0249/#iter
+ This iterator is shared. Advancing this iterator will advance other
+ iterators created from this cursor.
+ """
+ warnings.warn("DB-API extension cursor.__iter__() used")
+ return self
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
diff --git a/client-py/iotdb/dbapi/Exceptions.py b/client-py/iotdb/dbapi/Exceptions.py
new file mode 100644
index 000000000000..d58689c86930
--- /dev/null
+++ b/client-py/iotdb/dbapi/Exceptions.py
@@ -0,0 +1,61 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+class Error(Exception):
+ pass
+
+
+class Warning(Exception):
+ pass
+
+
+class DatabaseError(Error):
+ pass
+
+
+class DataError(DatabaseError):
+ pass
+
+
+class InterfaceError(Error):
+ pass
+
+
+class InternalError(DatabaseError):
+ pass
+
+
+class IntegrityError(DatabaseError):
+ pass
+
+
+class OperationalError(DatabaseError):
+ pass
+
+
+class ProgrammingError(DatabaseError):
+ pass
+
+
+class NotSupportedError(DatabaseError):
+ pass
+
+
+class ConnectionError(DatabaseError):
+ pass
diff --git a/client-py/iotdb/dbapi/__init__.py b/client-py/iotdb/dbapi/__init__.py
new file mode 100644
index 000000000000..9f8006175008
--- /dev/null
+++ b/client-py/iotdb/dbapi/__init__.py
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from .Connection import Connection as connect
+from .Exceptions import Error
+
+__all__ = [connect, Error]
+
+apilevel = "2.0"
+threadsafety = 2
+paramstyle = "pyformat"
diff --git a/client-py/iotdb/dbapi/tests/__init__.py b/client-py/iotdb/dbapi/tests/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/dbapi/tests/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/iotdb/dbapi/tests/test_connection.py b/client-py/iotdb/dbapi/tests/test_connection.py
new file mode 100644
index 000000000000..cb1f6c1e65b0
--- /dev/null
+++ b/client-py/iotdb/dbapi/tests/test_connection.py
@@ -0,0 +1,57 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from iotdb.IoTDBContainer import IoTDBContainer
+from iotdb.dbapi import connect
+
+final_flag = True
+failed_count = 0
+
+
+def test_fail():
+ global failed_count
+ global final_flag
+ final_flag = False
+ failed_count += 1
+
+
+def print_message(message):
+ print("*********")
+ print(message)
+ print("*********")
+
+
+def test_connection():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ conn = connect(db.get_container_host_ip(), db.get_exposed_port(6667))
+ if conn.is_close:
+ print("can't create connect")
+ exit(1)
+ conn.close()
+ if not conn.is_close:
+ test_fail()
+ print_message("failed to close the connection!")
+
+
+if final_flag:
+ print("All executions done!!")
+else:
+ print("Some test failed, please have a check")
+ print("failed count: ", failed_count)
+ exit(1)
diff --git a/client-py/iotdb/dbapi/tests/test_cursor.py b/client-py/iotdb/dbapi/tests/test_cursor.py
new file mode 100644
index 000000000000..6cd42257dc03
--- /dev/null
+++ b/client-py/iotdb/dbapi/tests/test_cursor.py
@@ -0,0 +1,123 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from iotdb.IoTDBContainer import IoTDBContainer
+from iotdb.dbapi import connect
+from iotdb.dbapi.Cursor import Cursor
+
+final_flag = True
+failed_count = 0
+
+
+def test_fail():
+ global failed_count
+ global final_flag
+ final_flag = False
+ failed_count += 1
+
+
+def print_message(message):
+ print("*********")
+ print(message)
+ print("*********")
+
+
+def test_cursor():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ conn = connect(db.get_container_host_ip(), db.get_exposed_port(6667))
+ cursor: Cursor = conn.cursor()
+
+ # execute test
+ cursor.execute("create storage group root.cursor")
+ cursor.execute("create storage group root.cursor_s1")
+ cursor.execute("delete storage group root.cursor_s1")
+ if cursor.rowcount < 0:
+ test_fail()
+ print_message("execute test failed!")
+
+ # execute with args test
+ cursor.execute(
+ "create timeseries root.cursor.temperature with datatype=FLOAT,encoding=RLE"
+ )
+ cursor.execute(
+ "insert into root.cursor(timestamp,temperature) values(1,%(temperature)s)",
+ {"temperature": 0.3},
+ )
+ cursor.execute(
+ "insert into root.cursor(timestamp,temperature) values(2,%(temperature)s)",
+ {"temperature": 0.4},
+ )
+ cursor.execute("select * from root.cursor")
+ count = 2
+ actual_count = 0
+ for row in cursor.fetchall():
+ actual_count += 1
+ if count != actual_count:
+ test_fail()
+ print_message("execute with args test failed!")
+
+ # executemany with args test
+ args = [
+ {"timestamp": 3, "temperature": 3},
+ {"timestamp": 4, "temperature": 4},
+ {"timestamp": 5, "temperature": 5},
+ {"timestamp": 6, "temperature": 6},
+ {"timestamp": 7, "temperature": 7},
+ ]
+ cursor.executemany(
+ "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(temperature)s)",
+ args,
+ )
+ cursor.execute("select * from root.cursor")
+ count = 7
+ actual_count = 0
+ for row in cursor.fetchall():
+ actual_count += 1
+ if count != actual_count:
+ test_fail()
+ print_message("executemany with args test failed!")
+
+ # fetchmany test
+ cursor.execute("select * from root.cursor")
+ count = 2
+ actual_count = 0
+ for row in cursor.fetchmany(count):
+ actual_count += 1
+ if count != actual_count:
+ test_fail()
+ print_message("fetchmany test failed!")
+
+ # fetchone test
+ cursor.execute("select * from root.cursor")
+ row = cursor.fetchone()
+ if row[0] != 1:
+ test_fail()
+ print_message("fetchone test failed")
+
+ cursor.execute("delete storage group root.cursor")
+ cursor.close()
+ conn.close()
+
+
+if final_flag:
+ print("All executions done!!")
+else:
+ print("Some test failed, please have a check")
+ print("failed count: ", failed_count)
+ exit(1)
diff --git a/client-py/iotdb/sqlalchemy/IoTDBDialect.py b/client-py/iotdb/sqlalchemy/IoTDBDialect.py
new file mode 100644
index 000000000000..baf5d6525d4c
--- /dev/null
+++ b/client-py/iotdb/sqlalchemy/IoTDBDialect.py
@@ -0,0 +1,136 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from sqlalchemy import types, util
+from sqlalchemy.engine import default
+from sqlalchemy.sql.sqltypes import String
+
+from iotdb import dbapi
+
+from .IoTDBSQLCompiler import IoTDBSQLCompiler
+from .IoTDBTypeCompiler import IoTDBTypeCompiler
+from .IoTDBIdentifierPreparer import IoTDBIdentifierPreparer
+
+TYPES_MAP = {
+ "BOOLEAN": types.Boolean,
+ "INT32": types.Integer,
+ "INT64": types.BigInteger,
+ "FLOAT": types.Float,
+ "DOUBLE": types.Float,
+ "TEXT": types.Text,
+ "LONG": types.BigInteger,
+}
+
+
+class IoTDBDialect(default.DefaultDialect):
+ name = "iotdb"
+ driver = "iotdb-python"
+ statement_compiler = IoTDBSQLCompiler
+ type_compiler = IoTDBTypeCompiler
+ preparer = IoTDBIdentifierPreparer
+ convert_unicode = True
+
+ supports_unicode_statements = True
+ supports_unicode_binds = True
+ supports_simple_order_by_label = False
+ supports_schemas = True
+ supports_right_nested_joins = False
+ description_encoding = None
+
+ if hasattr(String, "RETURNS_UNICODE"):
+ returns_unicode_strings = String.RETURNS_UNICODE
+ else:
+
+ def _check_unicode_returns(self, connection, additional_tests=None):
+ return True
+
+ _check_unicode_returns = _check_unicode_returns
+
+ def create_connect_args(self, url):
+ # inherits the docstring from interfaces.Dialect.create_connect_args
+ opts = url.translate_connect_args()
+ opts.update(url.query)
+ opts.update({"sqlalchemy_mode": True})
+ return [[], opts]
+
+ @classmethod
+ def dbapi(cls):
+ return dbapi
+
+ def has_schema(self, connection, schema):
+ return schema in self.get_schema_names(connection)
+
+ def has_table(self, connection, table_name, schema=None, **kw):
+ return table_name in self.get_table_names(connection, schema=schema)
+
+ def get_schema_names(self, connection, **kw):
+ cursor = connection.execute("SHOW STORAGE GROUP")
+ return [row[0] for row in cursor.fetchall()]
+
+ def get_table_names(self, connection, schema=None, **kw):
+ cursor = connection.execute(
+ "SHOW DEVICES %s.**" % (schema or self.default_schema_name)
+ )
+ return [row[0].replace(schema + ".", "", 1) for row in cursor.fetchall()]
+
+ def get_columns(self, connection, table_name, schema=None, **kw):
+ cursor = connection.execute("SHOW TIMESERIES %s.%s.*" % (schema, table_name))
+ columns = [self._general_time_column_info()]
+ for row in cursor.fetchall():
+ columns.append(self._create_column_info(row, schema, table_name))
+ return columns
+
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ pass
+
+ def get_foreign_keys(self, connection, table_name, schema=None, **kw):
+ return []
+
+ def get_indexes(self, connection, table_name, schema=None, **kw):
+ return []
+
+ @util.memoized_property
+ def _dialect_specific_select_one(self):
+ # IoTDB does not support select 1
+ # so replace the statement with "show version"
+ return "SHOW VERSION"
+
+ def _general_time_column_info(self):
+ """
+ Treat Time as a column
+ """
+ return {
+ "name": "Time",
+ "type": self._resolve_type("LONG"),
+ "nullable": False,
+ "default": None,
+ }
+
+ def _create_column_info(self, row, schema, table_name):
+ """
+ Generate description information for each column
+ """
+ return {
+ "name": row[0].replace(schema + "." + table_name + ".", "", 1),
+ "type": self._resolve_type(row[3]),
+ "nullable": True,
+ "default": None,
+ }
+
+ def _resolve_type(self, type_):
+ return TYPES_MAP.get(type_, types.UserDefinedType)
diff --git a/client-py/iotdb/sqlalchemy/IoTDBIdentifierPreparer.py b/client-py/iotdb/sqlalchemy/IoTDBIdentifierPreparer.py
new file mode 100644
index 000000000000..e09dd3c2305c
--- /dev/null
+++ b/client-py/iotdb/sqlalchemy/IoTDBIdentifierPreparer.py
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from sqlalchemy.sql.compiler import IdentifierPreparer
+
+
+class IoTDBIdentifierPreparer(IdentifierPreparer):
+ def __init__(self, dialect, **kw):
+ quote = "`"
+ super(IoTDBIdentifierPreparer, self).__init__(
+ dialect, initial_quote=quote, escape_quote=quote, **kw
+ )
diff --git a/client-py/iotdb/sqlalchemy/IoTDBSQLCompiler.py b/client-py/iotdb/sqlalchemy/IoTDBSQLCompiler.py
new file mode 100644
index 000000000000..36c4ca0bf764
--- /dev/null
+++ b/client-py/iotdb/sqlalchemy/IoTDBSQLCompiler.py
@@ -0,0 +1,243 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from sqlalchemy.sql.compiler import SQLCompiler
+from sqlalchemy.sql.compiler import OPERATORS
+from sqlalchemy.sql import operators
+
+
+class IoTDBSQLCompiler(SQLCompiler):
+ def order_by_clause(self, select, **kw):
+ """allow dialects to customize how ORDER BY is rendered."""
+
+ order_by = select._order_by_clause._compiler_dispatch(self, **kw)
+ if "Time" in order_by:
+ return " ORDER BY " + order_by.replace('"', "")
+ else:
+ return ""
+
+ def group_by_clause(self, select, **kw):
+ """allow dialects to customize how GROUP BY is rendered."""
+ return ""
+
+ def visit_select(
+ self,
+ select,
+ asfrom=False,
+ parens=True,
+ fromhints=None,
+ compound_index=0,
+ nested_join_translation=False,
+ select_wraps_for=None,
+ lateral=False,
+ **kwargs,
+ ):
+ """
+ Override this method to solve two problems
+ 1. IoTDB does not support querying Time as a measurement name (e.g. select Time from root.storagegroup.device)
+ 2. IoTDB does not support path.measurement format to determine a column (e.g. select root.storagegroup.device.temperature from root.storagegroup.device)
+ """
+ needs_nested_translation = (
+ select.use_labels
+ and not nested_join_translation
+ and not self.stack
+ and not self.dialect.supports_right_nested_joins
+ )
+
+ if needs_nested_translation:
+ transformed_select = self._transform_select_for_nested_joins(select)
+ text = self.visit_select(
+ transformed_select,
+ asfrom=asfrom,
+ parens=parens,
+ fromhints=fromhints,
+ compound_index=compound_index,
+ nested_join_translation=True,
+ **kwargs,
+ )
+
+ toplevel = not self.stack
+ entry = self._default_stack_entry if toplevel else self.stack[-1]
+
+ populate_result_map = need_column_expressions = (
+ toplevel
+ or entry.get("need_result_map_for_compound", False)
+ or entry.get("need_result_map_for_nested", False)
+ )
+
+ if compound_index > 0:
+ populate_result_map = False
+
+ # this was first proposed as part of #3372; however, it is not
+ # reached in current tests and could possibly be an assertion
+ # instead.
+ if not populate_result_map and "add_to_result_map" in kwargs:
+ del kwargs["add_to_result_map"]
+
+ if needs_nested_translation:
+ if populate_result_map:
+ self._transform_result_map_for_nested_joins(select, transformed_select)
+ return text
+
+ froms = self._setup_select_stack(select, entry, asfrom, lateral)
+
+ column_clause_args = kwargs.copy()
+ column_clause_args.update(
+ {"within_label_clause": False, "within_columns_clause": False}
+ )
+
+ text = "SELECT " # we're off to a good start !
+
+ if select._hints:
+ hint_text, byfrom = self._setup_select_hints(select)
+ if hint_text:
+ text += hint_text + " "
+ else:
+ byfrom = None
+
+ if select._prefixes:
+ text += self._generate_prefixes(select, select._prefixes, **kwargs)
+
+ text += self.get_select_precolumns(select, **kwargs)
+ # the actual list of columns to print in the SELECT column list.
+ # IoTDB does not support querying Time as a measurement name (e.g. select Time from root.storagegroup.device)
+ columns = []
+ for name, column in select._columns_plus_names:
+ column.table = None
+ columns.append(
+ self._label_select_column(
+ select,
+ column,
+ populate_result_map,
+ asfrom,
+ column_clause_args,
+ name=name,
+ need_column_expressions=need_column_expressions,
+ )
+ )
+ inner_columns = [c for c in columns if c is not None]
+
+ if populate_result_map and select_wraps_for is not None:
+ # if this select is a compiler-generated wrapper,
+ # rewrite the targeted columns in the result map
+
+ translate = dict(
+ zip(
+ [name for (key, name) in select._columns_plus_names],
+ [name for (key, name) in select_wraps_for._columns_plus_names],
+ )
+ )
+
+ self._result_columns = [
+ (key, name, tuple(translate.get(o, o) for o in obj), type_)
+ for key, name, obj, type_ in self._result_columns
+ ]
+ # IoTDB does not allow to query Time as column,
+ # need to filter out Time and pass Time and Time's alias to DBAPI separately
+ # to achieve the query of Time by encoding.
+ time_column_index = []
+ time_column_names = []
+ for i in range(len(inner_columns)):
+ column_strs = (
+ inner_columns[i].replace(self.preparer.initial_quote, "").split()
+ )
+ if "Time" in column_strs:
+ time_column_index.append(str(i))
+ time_column_names.append(
+ column_strs[2]
+ if OPERATORS[operators.as_] in column_strs
+ else column_strs[0]
+ )
+ # delete Time column
+ inner_columns = list(
+ filter(
+ lambda x: "Time"
+ not in x.replace(self.preparer.initial_quote, "").split(),
+ inner_columns,
+ )
+ )
+ if inner_columns and time_column_index:
+ inner_columns[-1] = (
+ inner_columns[-1]
+ + " \n FROM Time Index "
+ + " ".join(time_column_index)
+ + "\n FROM Time Name "
+ + " ".join(time_column_names)
+ )
+
+ text = self._compose_select_body(
+ text, select, inner_columns, froms, byfrom, kwargs
+ )
+
+ if select._statement_hints:
+ per_dialect = [
+ ht
+ for (dialect_name, ht) in select._statement_hints
+ if dialect_name in ("*", self.dialect.name)
+ ]
+ if per_dialect:
+ text += " " + self.get_statement_hint_text(per_dialect)
+
+ if self.ctes and toplevel:
+ text = self._render_cte_clause() + text
+
+ if select._suffixes:
+ text += " " + self._generate_prefixes(select, select._suffixes, **kwargs)
+
+ self.stack.pop(-1)
+
+ if (asfrom or lateral) and parens:
+ return "(" + text + ")"
+ else:
+ return text
+
+ def visit_table(
+ self,
+ table,
+ asfrom=False,
+ iscrud=False,
+ ashint=False,
+ fromhints=None,
+ use_schema=True,
+ **kwargs,
+ ):
+ """
+ IoTDB's table does not support quotation marks (e.g. select ** from `root.`)
+ need to override this method
+ """
+ if asfrom or ashint:
+ effective_schema = self.preparer.schema_for_object(table)
+
+ if use_schema and effective_schema:
+ ret = effective_schema + "." + table.name
+ else:
+ ret = table.name
+ if fromhints and table in fromhints:
+ ret = self.format_from_hint_text(ret, table, fromhints[table], iscrud)
+ return ret
+ else:
+ return ""
+
+ def visit_column(
+ self, column, add_to_result_map=None, include_table=True, **kwargs
+ ):
+ """
+ IoTDB's where statement does not support "table".column format(e.g. "table".column > 1)
+ need to override this method to return the name of column directly
+ """
+ return column.name
diff --git a/client-py/iotdb/sqlalchemy/IoTDBTypeCompiler.py b/client-py/iotdb/sqlalchemy/IoTDBTypeCompiler.py
new file mode 100644
index 000000000000..4cfd2480bd4b
--- /dev/null
+++ b/client-py/iotdb/sqlalchemy/IoTDBTypeCompiler.py
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from sqlalchemy.sql.compiler import GenericTypeCompiler
+
+
+class IoTDBTypeCompiler(GenericTypeCompiler):
+ def visit_FLOAT(self, type_, **kw):
+ return "FLOAT"
+
+ def visit_NUMERIC(self, type_, **kw):
+ return "INT64"
+
+ def visit_DECIMAL(self, type_, **kw):
+ return "DOUBLE"
+
+ def visit_INTEGER(self, type_, **kw):
+ return "INT32"
+
+ def visit_SMALLINT(self, type_, **kw):
+ return "INT32"
+
+ def visit_BIGINT(self, type_, **kw):
+ return "LONG"
+
+ def visit_TIMESTAMP(self, type_, **kw):
+ return "LONG"
+
+ def visit_text(self, type_, **kw):
+ return "TEXT"
diff --git a/client-py/iotdb/sqlalchemy/__init__.py b/client-py/iotdb/sqlalchemy/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/sqlalchemy/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/iotdb/sqlalchemy/tests/__init__.py b/client-py/iotdb/sqlalchemy/tests/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/sqlalchemy/tests/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/iotdb/sqlalchemy/tests/test_dialect.py b/client-py/iotdb/sqlalchemy/tests/test_dialect.py
new file mode 100644
index 000000000000..7b0e3e2ad659
--- /dev/null
+++ b/client-py/iotdb/sqlalchemy/tests/test_dialect.py
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import operator
+
+from sqlalchemy import create_engine, inspect
+from sqlalchemy.dialects import registry
+
+from iotdb.IoTDBContainer import IoTDBContainer
+
+final_flag = True
+failed_count = 0
+
+
+def test_fail():
+ global failed_count
+ global final_flag
+ final_flag = False
+ failed_count += 1
+
+
+def print_message(message):
+ print("*********")
+ print(message)
+ print("*********")
+
+
+def test_dialect():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ url = (
+ "iotdb://root:root@"
+ + db.get_container_host_ip()
+ + ":"
+ + db.get_exposed_port(6667)
+ )
+ registry.register("iotdb", "iotdb.sqlalchemy.IoTDBDialect", "IoTDBDialect")
+ eng = create_engine(url)
+ eng.execute("create storage group root.cursor")
+ eng.execute("create storage group root.cursor_s1")
+ eng.execute(
+ "create timeseries root.cursor.device1.temperature with datatype=FLOAT,encoding=RLE"
+ )
+ eng.execute(
+ "create timeseries root.cursor.device1.status with datatype=FLOAT,encoding=RLE"
+ )
+ eng.execute(
+ "create timeseries root.cursor.device2.temperature with datatype=FLOAT,encoding=RLE"
+ )
+ insp = inspect(eng)
+ # test get_schema_names
+ schema_names = insp.get_schema_names()
+ if not operator.eq(schema_names, ["root.cursor", "root.cursor_s1"]):
+ test_fail()
+ print_message("test get_schema_names failed!")
+ # test get_table_names
+ table_names = insp.get_table_names("root.cursor")
+ if not operator.eq(table_names, ["device1", "device2"]):
+ test_fail()
+ print_message("test get_table_names failed!")
+ # test get_columns
+ columns = insp.get_columns(table_name="device1", schema="root.cursor")
+ if len(columns) != 3:
+ test_fail()
+ print_message("test get_columns failed!")
+ eng.execute("delete storage group root.cursor")
+ eng.execute("delete storage group root.cursor_s1")
+ # close engine
+ eng.dispose()
+
+
+if final_flag:
+ print("All executions done!!")
+else:
+ print("Some test failed, please have a check")
+ print("failed count: ", failed_count)
+ exit(1)
diff --git a/client-py/iotdb/template/InternalNode.py b/client-py/iotdb/template/InternalNode.py
new file mode 100644
index 000000000000..bac17ca90ab1
--- /dev/null
+++ b/client-py/iotdb/template/InternalNode.py
@@ -0,0 +1,41 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from .TemplateNode import TemplateNode
+
+
+class InternalNode(TemplateNode):
+ def __init__(self, name, share_time):
+ super().__init__(name)
+ self.children = {}
+ self.share_time = share_time
+
+ def add_child(self, node: TemplateNode):
+ if node.get_name() in self.children.keys():
+ assert "Duplicated child of node in template."
+
+ self.children.update({node.get_name(): node})
+
+ def delete_child(self, node):
+ self.children.pop(node.get_name(), None)
+
+ def get_children(self):
+ return self.children
+
+ def is_share_time(self):
+ return self.share_time
diff --git a/client-py/iotdb/template/MeasurementNode.py b/client-py/iotdb/template/MeasurementNode.py
new file mode 100644
index 000000000000..7d96d4bc0d3a
--- /dev/null
+++ b/client-py/iotdb/template/MeasurementNode.py
@@ -0,0 +1,56 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+from .TemplateNode import TemplateNode
+from ..tsfile.utils.ReadWriteIOUtils import ReadWriteUtils
+
+
+class MeasurementNode(TemplateNode):
+ def __init__(
+ self,
+ name: str,
+ data_type: TSDataType,
+ encoding: TSEncoding,
+ compression_type: Compressor,
+ ):
+ self.name = name
+ self.data_type = data_type
+ self.encoding = encoding
+ self.compression_type = compression_type
+
+ def is_measurement(self):
+ return True
+
+ def get_data_type(self):
+ return self.data_type
+
+ def get_encoding(self):
+ return self.encoding
+
+ def get_compression_type(self):
+ return self.compression_type
+
+ def serialize(self, *args, **kwargs):
+ format_str_list, values_tobe_packed = args
+ ReadWriteUtils.write(self.get_name(), format_str_list, values_tobe_packed)
+ ReadWriteUtils.write(self.get_data_type(), format_str_list, values_tobe_packed)
+ ReadWriteUtils.write(self.get_encoding(), format_str_list, values_tobe_packed)
+ ReadWriteUtils.write(
+ self.get_compression_type(), format_str_list, values_tobe_packed
+ )
diff --git a/client-py/iotdb/template/Template.py b/client-py/iotdb/template/Template.py
new file mode 100644
index 000000000000..38f883d03fb5
--- /dev/null
+++ b/client-py/iotdb/template/Template.py
@@ -0,0 +1,87 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import struct
+
+from .TemplateNode import TemplateNode
+from ..tsfile.common.constant.TsFileConstant import TsFileConstant
+from ..tsfile.utils.Pair import Pair
+from ..tsfile.utils.ReadWriteIOUtils import ReadWriteUtils
+
+
+class Template:
+ def __init__(self, name, share_time: bool = False):
+ self.name = name
+ self.children = dict()
+ self.share_time = share_time
+
+ def get_name(self) -> object:
+ return self.name
+
+ def is_share_time(self) -> object:
+ return self.share_time
+
+ def set_share_time(self, share_time: bool):
+ self.share_time = share_time
+
+ def add_template(self, child: TemplateNode):
+ if self.children.get(child.get_name()):
+ raise Exception("Duplicated child of node in template.")
+ self.children.update({child.get_name(): child})
+
+ def delete_from_template(self, name: str):
+ if not self.children.pop(name, []):
+ raise Exception("It is not a direct child of the template: " + name)
+
+ @property
+ def serialize(self):
+ format_str_list = [">"]
+ values_tobe_packed = []
+ stack = []
+ aligned_prefix = set()
+ ReadWriteUtils.write(self.get_name(), format_str_list, values_tobe_packed)
+ ReadWriteUtils.write(self.is_share_time(), format_str_list, values_tobe_packed)
+ if self.is_share_time():
+ aligned_prefix.add("")
+
+ for child in self.children:
+ stack.append(Pair("", self.children[child]))
+
+ while stack:
+ pair = stack.pop()
+ prefix = pair.left
+ cur_node = pair.right
+ full_path = [prefix]
+ if not cur_node.is_measurement():
+ if prefix != "":
+ full_path.append(TsFileConstant.PATH_SEPARATOR)
+ full_path.append(cur_node.get_name())
+ if cur_node.is_share_time():
+ aligned_prefix.add("".join(full_path))
+ for child in cur_node.children:
+ stack.append(Pair("".join(full_path), cur_node.children[child]))
+ else:
+ ReadWriteUtils.write(prefix, format_str_list, values_tobe_packed)
+ if prefix in aligned_prefix:
+ ReadWriteUtils.write(True, format_str_list, values_tobe_packed)
+ else:
+ ReadWriteUtils.write(False, format_str_list, values_tobe_packed)
+ cur_node.serialize(format_str_list, values_tobe_packed)
+
+ format_str = "".join(format_str_list)
+ return struct.pack(format_str, *values_tobe_packed)
diff --git a/client-py/iotdb/template/TemplateNode.py b/client-py/iotdb/template/TemplateNode.py
new file mode 100644
index 000000000000..6bab445394da
--- /dev/null
+++ b/client-py/iotdb/template/TemplateNode.py
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+class TemplateNode(object):
+ """
+ Template class, this class should be used to schema template node
+ """
+
+ def __init__(self, name):
+ self.name = name
+
+ def get_name(self):
+ return self.name
+
+ def get_children(self):
+ return None
+
+ def add_child(self, node):
+ ...
+
+ def delete_child(self, node):
+ ...
+
+ def is_measurement(self):
+ return False
+
+ def is_share_time(self):
+ return False
+
+ def serialize(self, *args, **kwargs):
+ ...
diff --git a/client-py/iotdb/template/TemplateQueryType.py b/client-py/iotdb/template/TemplateQueryType.py
new file mode 100644
index 000000000000..370d1c0a3671
--- /dev/null
+++ b/client-py/iotdb/template/TemplateQueryType.py
@@ -0,0 +1,39 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+from enum import Enum, unique
+
+
+@unique
+class TemplateQueryType(Enum):
+ COUNT_MEASUREMENTS = 0
+ IS_MEASUREMENT = 1
+ PATH_EXIST = 2
+ SHOW_MEASUREMENTS = 3
+ SHOW_TEMPLATES = 4
+ SHOW_SET_TEMPLATES = 5
+ SHOW_USING_TEMPLATES = 6
+
+ # this method is implemented to avoid the issue reported by:
+ # https://bugs.python.org/issue30545
+ def __eq__(self, other) -> bool:
+ return self.value == other.value
+
+ def __hash__(self):
+ return self.value
diff --git a/client-py/iotdb/template/__init__.py b/client-py/iotdb/template/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/template/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/iotdb/tsfile/__init__.py b/client-py/iotdb/tsfile/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/tsfile/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/iotdb/tsfile/common/__init__.py b/client-py/iotdb/tsfile/common/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/tsfile/common/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/iotdb/tsfile/common/constant/TsFileConstant.py b/client-py/iotdb/tsfile/common/constant/TsFileConstant.py
new file mode 100644
index 000000000000..0baad6a5fb87
--- /dev/null
+++ b/client-py/iotdb/tsfile/common/constant/TsFileConstant.py
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+class TsFileConstant:
+ TSFILE_SUFFIX = ".tsfile"
+ TSFILE_HOME = "TSFILE_HOME"
+ TSFILE_CONF = "TSFILE_CONF"
+ PATH_ROOT = "root"
+ TMP_SUFFIX = "tmp"
+ PATH_SEPARATOR = "."
+ PATH_SEPARATOR_CHAR = "."
+ PATH_SEPARATER_NO_REGEX = "\\."
+ DOUBLE_QUOTE = '"'
+
+ TIME_COLUMN_MASK = 0x80
+
+ VALUE_COLUMN_MASK = 0x40
+
+ def __ts_file_constant(self):
+ ...
diff --git a/client-py/iotdb/tsfile/common/constant/__init__.py b/client-py/iotdb/tsfile/common/constant/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/tsfile/common/constant/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/iotdb/tsfile/utils/Pair.py b/client-py/iotdb/tsfile/utils/Pair.py
new file mode 100644
index 000000000000..f3603fa91257
--- /dev/null
+++ b/client-py/iotdb/tsfile/utils/Pair.py
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+class Pair:
+ def __init__(self, left: str, right):
+ self.__serialVersionUID = -1398609631703707002
+ self.left = left
+ self.right = right
diff --git a/client-py/iotdb/tsfile/utils/ReadWriteIOUtils.py b/client-py/iotdb/tsfile/utils/ReadWriteIOUtils.py
new file mode 100644
index 000000000000..6101906ced93
--- /dev/null
+++ b/client-py/iotdb/tsfile/utils/ReadWriteIOUtils.py
@@ -0,0 +1,77 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
+
+
+class ReadWriteUtils:
+ BOOLEAN_LEN = 1
+ SHORT_LEN = 2
+ INT_LEN = 4
+ LONG_LEN = 8
+ DOUBLE_LEN = 8
+ FLOAT_LEN = 4
+ BIT_LEN = 0.125
+ NO_BYTE_TO_READ = -1
+ magicStringBytes = []
+ RETURN_ERROR = "Intend to read %d bytes but %d are actually returned"
+ URN_ERROR = "Intend to read %d bytes but %d are actually returned"
+
+ @classmethod
+ def write(cls, *args, **kwargs):
+ value, format_str_list, values_tobe_packed = args
+ if isinstance(value, bool):
+ cls.write_bool(value, format_str_list, values_tobe_packed)
+ elif isinstance(value, str):
+ cls.write_str(value, format_str_list, values_tobe_packed)
+ elif isinstance(value, int):
+ cls.write_int(value, format_str_list, values_tobe_packed)
+ elif isinstance(value, TSDataType):
+ cls.write_byte(value.value, format_str_list, values_tobe_packed)
+ elif isinstance(value, TSEncoding):
+ cls.write_byte(value.value, format_str_list, values_tobe_packed)
+ elif isinstance(value, Compressor):
+ cls.write_byte(value.value, format_str_list, values_tobe_packed)
+
+ @classmethod
+ def write_str(cls, s: str, format_str_list, values_tobe_packed):
+ if s is None:
+ cls.write_int(cls.NO_BYTE_TO_READ, format_str_list, values_tobe_packed)
+
+ value_bytes = bytes(s, "utf-8")
+ format_str_list.append("i")
+ format_str_list.append(str(len(value_bytes)))
+ format_str_list.append("s")
+
+ values_tobe_packed.append(len(value_bytes))
+ values_tobe_packed.append(value_bytes)
+
+ @classmethod
+ def write_int(cls, i: int, format_str_list, values_tobe_packed):
+ format_str_list.append("i")
+ values_tobe_packed.append(i)
+
+ @classmethod
+ def write_bool(cls, flag: bool, format_str_list, values_tobe_packed):
+ format_str_list.append("?")
+ values_tobe_packed.append(flag)
+
+ @classmethod
+ def write_byte(cls, b, format_str_list, values_tobe_packed):
+ format_str_list.append("b")
+ values_tobe_packed.append(b)
diff --git a/client-py/iotdb/tsfile/utils/__init__.py b/client-py/iotdb/tsfile/utils/__init__.py
new file mode 100644
index 000000000000..2a1e720805f2
--- /dev/null
+++ b/client-py/iotdb/tsfile/utils/__init__.py
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/client-py/requirements.txt b/client-py/requirements.txt
index 566c75f79c72..8715d50595b3 100644
--- a/client-py/requirements.txt
+++ b/client-py/requirements.txt
@@ -20,4 +20,7 @@
pandas~=1.3.5
# Testcontainer
testcontainers==3.3.0
-numpy~=1.21.4
\ No newline at end of file
+numpy~=1.21.4
+# SQLAlchemy Dialect
+sqlalchemy == 1.3.20
+sqlalchemy-utils == 0.36.8
\ No newline at end of file
diff --git a/client-py/setup.py b/client-py/setup.py
index 11e30cf40f49..a3f147af3cff 100644
--- a/client-py/setup.py
+++ b/client-py/setup.py
@@ -44,6 +44,8 @@
"pandas>=1.0.0,<1.99.99",
"numpy>=1.0.0",
"testcontainers>=2.0.0",
+ "sqlalchemy>=1.3.16, <1.4, !=1.3.21",
+ "sqlalchemy-utils>=0.37.8, <0.38",
],
classifiers=[
"Programming Language :: Python :: 3",
@@ -55,4 +57,9 @@
python_requires=">=3.7",
license="Apache License, Version 2.0",
website="https://iotdb.apache.org",
+ entry_points={
+ "sqlalchemy.dialects": [
+ "iotdb = iotdb.sqlalchemy.IoTDBDialect:IoTDBDialect",
+ ],
+ },
)
diff --git a/client-py/tests/test_dataframe.py b/client-py/tests/test_dataframe.py
index f95ade6e0895..c7cce58ea5b1 100644
--- a/client-py/tests/test_dataframe.py
+++ b/client-py/tests/test_dataframe.py
@@ -27,9 +27,10 @@ def test_simple_query():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.device0")
# Write data
- session.insert_str_record("root.device", 123, "pressure", "15.0")
+ session.insert_str_record("root.device0", 123, "pressure", "15.0")
# Read
session_data_set = session.execute_query_statement("SELECT ** FROM root")
@@ -37,7 +38,7 @@ def test_simple_query():
session.close()
- assert list(df.columns) == ["Time", "root.device.pressure"]
+ assert list(df.columns) == ["Time", "root.device0.pressure"]
assert_array_equal(df.values, [[123.0, 15.0]])
@@ -46,9 +47,10 @@ def test_non_time_query():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.device0")
# Write data
- session.insert_str_record("root.device", 123, "pressure", "15.0")
+ session.insert_str_record("root.device0", 123, "pressure", "15.0")
# Read
session_data_set = session.execute_query_statement("SHOW TIMESERIES")
@@ -70,9 +72,9 @@ def test_non_time_query():
df.values,
[
[
- "root.device.pressure",
+ "root.device0.pressure",
None,
- "root.device",
+ "root.device0",
"FLOAT",
"GORILLA",
"SNAPPY",
diff --git a/client-py/tests/test_one_device.py b/client-py/tests/test_one_device.py
index c364cd1105f8..d428947e9b8d 100644
--- a/client-py/tests/test_one_device.py
+++ b/client-py/tests/test_one_device.py
@@ -44,6 +44,7 @@ def test_one_device():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.str_test_01")
if not session.is_open():
print("can't open session")
diff --git a/client-py/tests/test_tablet.py b/client-py/tests/test_tablet.py
index fcb55133679a..1e80277d771b 100644
--- a/client-py/tests/test_tablet.py
+++ b/client-py/tests/test_tablet.py
@@ -30,6 +30,7 @@ def test_tablet_insertion():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.sg_test_01")
measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
data_types_ = [
@@ -72,6 +73,7 @@ def test_nullable_tablet_insertion():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.sg_test_01")
measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
data_types_ = [
diff --git a/client-py/tests/test_template.py b/client-py/tests/test_template.py
new file mode 100644
index 000000000000..1dd328fdb24c
--- /dev/null
+++ b/client-py/tests/test_template.py
@@ -0,0 +1,194 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from iotdb.IoTDBContainer import IoTDBContainer
+from iotdb.Session import Session
+from iotdb.template.InternalNode import InternalNode
+from iotdb.template.MeasurementNode import MeasurementNode
+from iotdb.template.Template import Template
+from iotdb.utils.IoTDBConstants import TSDataType, Compressor, TSEncoding
+
+
+def test_template_create():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
+ session.open(False)
+
+ measurement_template_name = "template_python"
+ template = Template(name=measurement_template_name, share_time=False)
+ m_node_1 = MeasurementNode(
+ name="s1",
+ data_type=TSDataType.INT64,
+ encoding=TSEncoding.RLE,
+ compression_type=Compressor.SNAPPY,
+ )
+ m_node_2 = MeasurementNode(
+ name="s2",
+ data_type=TSDataType.INT64,
+ encoding=TSEncoding.RLE,
+ compression_type=Compressor.SNAPPY,
+ )
+ m_node_3 = MeasurementNode(
+ name="s3",
+ data_type=TSDataType.INT64,
+ encoding=TSEncoding.RLE,
+ compression_type=Compressor.SNAPPY,
+ )
+ template.add_template(m_node_1)
+ template.add_template(m_node_2)
+ template.add_template(m_node_3)
+ session.create_schema_template(template)
+
+ assert session.show_measurements_in_template(measurement_template_name) == [
+ "s3",
+ "s1",
+ "s2",
+ ]
+ assert session.count_measurements_in_template(measurement_template_name) == 3
+ assert (
+ session.is_measurement_in_template(measurement_template_name, "s1") is True
+ )
+ assert (
+ session.is_path_exist_in_template(measurement_template_name, "s1") is True
+ )
+ assert (
+ session.is_path_exist_in_template(measurement_template_name, "s4") is False
+ )
+
+ session.delete_node_in_template(measurement_template_name, "s1")
+ assert session.show_measurements_in_template(measurement_template_name) == [
+ "s3",
+ "s2",
+ ]
+ assert session.count_measurements_in_template(measurement_template_name) == 2
+ assert (
+ session.is_path_exist_in_template(measurement_template_name, "s1") is False
+ )
+
+ tree_template_name = "treeTemplate_python"
+ template = Template(name=tree_template_name, share_time=True)
+ i_node_gps = InternalNode(name="GPS", share_time=False)
+ i_node_v = InternalNode(name="vehicle", share_time=True)
+ m_node_x = MeasurementNode(
+ "x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY
+ )
+
+ i_node_gps.add_child(m_node_x)
+ i_node_v.add_child(m_node_x)
+ template.add_template(i_node_gps)
+ template.add_template(i_node_v)
+ template.add_template(m_node_x)
+ session.create_schema_template(template)
+ assert session.show_measurements_in_template(tree_template_name) == [
+ "x",
+ "GPS.x",
+ "vehicle.x",
+ ]
+ assert session.count_measurements_in_template(tree_template_name) == 3
+
+ assert session.show_all_templates() == [
+ measurement_template_name,
+ tree_template_name,
+ ]
+ assert session.is_measurement_in_template(tree_template_name, "GPS") is False
+ assert session.is_measurement_in_template(tree_template_name, "GPS.x") is True
+
+ session.drop_schema_template(measurement_template_name)
+ session.drop_schema_template(tree_template_name)
+
+ session.close()
+
+
+def test_add_measurements_template():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
+ session.open(False)
+
+ template_name = "add_template_python"
+ template = Template(name=template_name, share_time=False)
+ i_node_v = InternalNode(name="GPS", share_time=False)
+ i_node_gps_x = MeasurementNode(
+ "x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY
+ )
+
+ i_node_v.add_child(i_node_gps_x)
+ template.add_template(i_node_v)
+ session.create_schema_template(template)
+
+ # # append schema template
+ data_types = [TSDataType.FLOAT, TSDataType.FLOAT, TSDataType.DOUBLE]
+ encoding_list = [TSEncoding.RLE, TSEncoding.RLE, TSEncoding.GORILLA]
+ compressor_list = [Compressor.SNAPPY, Compressor.SNAPPY, Compressor.LZ4]
+
+ measurements_aligned_path = ["aligned.s1", "aligned.s2", "aligned.s3"]
+ session.add_measurements_in_template(
+ template_name,
+ measurements_aligned_path,
+ data_types,
+ encoding_list,
+ compressor_list,
+ is_aligned=True,
+ )
+ # session.drop_schema_template("add_template_python")
+ measurements_aligned_path = ["unaligned.s1", "unaligned.s2", "unaligned.s3"]
+ session.add_measurements_in_template(
+ template_name,
+ measurements_aligned_path,
+ data_types,
+ encoding_list,
+ compressor_list,
+ is_aligned=False,
+ )
+ measurements_aligned_path = ["s1", "s2", "s3"]
+ session.add_measurements_in_template(
+ template_name,
+ measurements_aligned_path,
+ data_types,
+ encoding_list,
+ compressor_list,
+ is_aligned=False,
+ )
+
+ assert session.count_measurements_in_template(template_name) == 10
+ assert session.is_measurement_in_template(template_name, "GPS") is False
+ assert session.is_path_exist_in_template(template_name, "GPS.x") is True
+ assert session.is_path_exist_in_template(template_name, "x") is False
+
+ session.drop_schema_template(template_name)
+ session.close()
+
+
+def test_set_template():
+ with IoTDBContainer("iotdb:dev") as db:
+ db: IoTDBContainer
+ session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
+ session.open(False)
+
+ template_name = "set_template_python"
+ template = Template(name=template_name, share_time=False)
+ session.create_schema_template(template)
+
+ session.set_schema_template(template_name, "root.python.GPS")
+
+ assert session.show_paths_template_set_on(template_name) == ["root.python.GPS"]
+ assert session.show_paths_template_using_on(template_name) == []
+
+ session.unset_schema_template(template_name, "root.python.GPS")
+ session.drop_schema_template(template_name)
+ session.close()
diff --git a/client-py/tests/test_todf.py b/client-py/tests/test_todf.py
index feedcb3228ac..07953446cffa 100644
--- a/client-py/tests/test_todf.py
+++ b/client-py/tests/test_todf.py
@@ -69,6 +69,7 @@ def test_simple_query():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.wt1")
create_ts(session)
@@ -105,6 +106,7 @@ def test_with_null_query():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.wt1")
create_ts(session)
@@ -184,6 +186,7 @@ def test_multi_fetch():
db: IoTDBContainer
session = Session(db.get_container_host_ip(), db.get_exposed_port(6667))
session.open(False)
+ session.execute_non_query_statement("set storage group to root.wt1")
create_ts(session)
diff --git a/cluster/pom.xml b/cluster/pom.xml
deleted file mode 100644
index 61f5b652fbb3..000000000000
--- a/cluster/pom.xml
+++ /dev/null
@@ -1,228 +0,0 @@
-
-
-
- 4.0.0
-
- org.apache.iotdb
- iotdb-parent
- 0.14.0-SNAPSHOT
- ../pom.xml
-
- iotdb-cluster
- cluster
-
- false
- ${cluster.test.skip}
- ${cluster.test.skip}
-
-
-
-
- org.apache.thrift
- libthrift
- ${thrift.version}
-
-
- org.apache.iotdb
- service-rpc
- ${project.version}
-
-
- org.apache.thrift
- libthrift
-
-
-
-
- org.apache.iotdb
- iotdb-server
- ${project.version}
-
-
- org.apache.thrift
- libthrift
-
-
-
-
- commons-io
- commons-io
-
-
- org.apache.iotdb
- iotdb-thrift-cluster
- ${project.version}
-
-
- org.apache.thrift
- libthrift
-
-
- compile
-
-
- org.apache.iotdb
- iotdb-server
- ${project.version}
- test-jar
- test
-
-
- org.apache.iotdb
- iotdb-session
- ${project.version}
- compile
-
-
- org.apache.iotdb
- iotdb-jdbc
- ${project.version}
- compile
-
-
- commons-cli
- commons-cli
-
-
- org.awaitility
- awaitility
- ${awaitility.version}
- test
-
-
-
- org.powermock
- powermock-core
- test
-
-
- org.powermock
- powermock-module-junit4
- test
-
-
- org.powermock
- powermock-api-mockito2
- test
-
-
- org.apache.commons
- commons-pool2
-
-
-
-
- skipClusterTests
-
-
- skipTests
- true
-
-
-
- true
- true
- true
-
-
-
- skipUT_Cluster_Tests
-
-
- skipUTs
- true
-
-
-
- true
-
-
-
- only_test_Cluster
-
-
- cluster.test.only
- true
-
-
-
- false
- false
- false
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-surefire-plugin
-
- ${cluster.ut.skip}
- pertest
-
-
-
- org.apache.maven.plugins
- maven-failsafe-plugin
-
-
- run-integration-tests
- integration-test
-
- integration-test
- verify
-
-
-
-
- ${cluster.test.skip}
- ${cluster.it.skip}
-
-
-
- org.apache.maven.plugins
- maven-assembly-plugin
- ${maven.assembly.version}
-
-
-
- cluster-assembly
- package
-
- single
-
-
-
- src/assembly/cluster.xml
-
- false
-
-
-
-
-
-
-
diff --git a/cluster/src/assembly/cluster.xml b/cluster/src/assembly/cluster.xml
deleted file mode 100644
index 7025a3ed882f..000000000000
--- a/cluster/src/assembly/cluster.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
- cluster
-
- dir
- zip
-
- false
-
-
- lib
-
-
-
-
- src/assembly/resources
- ${file.separator}
-
-
- ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf
- ${file.separator}conf
-
-
- ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/tools
- ${file.separator}tools
-
-
- ${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf
- conf
-
-
-
diff --git a/cluster/src/assembly/resources/conf/iotdb-cluster.properties b/cluster/src/assembly/resources/conf/iotdb-cluster.properties
deleted file mode 100644
index 45de5102be67..000000000000
--- a/cluster/src/assembly/resources/conf/iotdb-cluster.properties
+++ /dev/null
@@ -1,200 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#-------------------------------------------IMPORTANT---------------------------------------------#
-# 1. Note that the system will automatically create a heartbeat port for each metadata service #
-# and data service. The default metadata heartbeat port is internal_meta_port + 1, #
-# The default data heartbeat port is internal_data_port + 1. #
-# So when you configure these two items and seed_nodes, pay attention to reserve a port for #
-# heartbeat service. #
-# 2. If open_server_rpc_port is set to true, the server module's RPC port will be turned on, #
-# and the server module's RPC port will be set to rpc_port (in iotdb-engines.properties) + 1, #
-# so this port should also be reserved. #
-#-------------------------------------------IMPORTANT---------------------------------------------#
-
-# used for communication between cluster nodes, eg heartbeat、raft logs and snapshots etc.
-# if this parameter is commented, then the IP that binded by the hostname will be used.
-internal_ip=127.0.0.1
-
-# port for metadata service
-internal_meta_port=9003
-
-# port for data service
-internal_data_port=40010
-
-# port for cluster info API, 6567 by default
-#cluster_info_public_port=6567
-
-# whether open port for server module (for debug purpose)
-# if true, the rpc_port of the single server will be changed to rpc_port (in iotdb-engines.properties) + 1
-# open_server_rpc_port=false
-
-# comma-separated {IP/DOMAIN}:internal_meta_port pairs, when used by start-datanode.sh(.bat),
-# this configuration means the nodes that will form the initial cluster,
-# every node that use start-datanode.sh(.bat) should have the same SEED_NODES, or the
-# building of the initial cluster will fail. WARNING: if the initial cluster is built, this
-# should not be changed before the environment is cleaned.
-# when used by add-node.sh(.bat), this means the nodes to which that the application of joining
-# the cluster will be sent, as all nodes can respond to a request, this configuration can be any
-# nodes that already in the cluster, unnecessary to be the nodes that were used to build the
-# initial cluster by start-datanode.sh(.bat). Several nodes will be picked randomly to send the
-# request, the number of nodes picked depends on the number of retries.
-#seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007
-seed_nodes=127.0.0.1:9003
-
-# whether to use thrift compressed protocol for internal communications. If you want to change
-# compression settings for external clients, please modify 'rpc_thrift_compression_enable' in
-# 'iotdb-engine.properties'.
-# WARNING: this must be consistent across all nodes in the cluster
-# rpc_thrift_compression_enable=false
-
-# number of replications for one partition
-default_replica_num=1
-
-# sub raft num for multi-raft
-multi_raft_factor=1
-
-# cluster name to identify different clusters
-# all node's cluster_name in one cluster are the same
-# cluster_name=default
-
-# Thrift socket and connection timeout between raft nodes, in milliseconds.
-# NOTE: the timeout of connection used for sending heartbeats and requesting votes
-# will be adjusted to min(heartbeat_interval_ms, connection_timeout_ms).
-# connection_timeout_ms=20000
-
-# write operation timeout threshold (ms), this is only for internal communications,
-# not for the whole operation.
-# write_operation_timeout_ms=30000
-
-# read operation timeout threshold (ms), this is only for internal communications,
-# not for the whole operation.
-# read_operation_timeout_ms=30000
-
-# the time interval (ms) between two rounds of heartbeat broadcast of one raft group leader.
-# Recommend to set it as 1/10 of election_timeout_ms, but larger than 1 RTT between each two nodes.
-# heartbeat_interval_ms=1000
-
-# The election timeout in follower, or the time waiting for requesting votes in elector, in milliseconds.
-# election_timeout_ms=20000
-
-# catch up timeout threshold (ms), this is used for a follower behind the leader too much,
-# so the leader will send logs(snapshot) to the follower,
-# NOTICE, it may cost minutes of time to send a snapshot,
-# so this parameter should be larger than the snapshot cost time.
-# catch_up_timeout_ms=300000
-
-# whether to use batch append entries in log catch up
-# use_batch_in_catch_up=true
-
-# the minimum number of committed logs in memory, after each log deletion, at most such number of logs
-# will remain in memory. Increasing the number will reduce the chance to use snapshot in catch-ups,
-# but will also increase the memory footprint
-# min_num_of_logs_in_mem=1000
-
-# maximum number of committed logs in memory, when reached, a log deletion will be triggered.
-# Increasing the number will reduce the chance to use snapshot in catch-ups, but will also increase
-# memory footprint
-# max_num_of_logs_in_mem=2000
-
-# Ratio of write memory allocated for raft log, 0.2 by default
-# Increasing the number will reduce the memory allocated for write process in iotdb, but will also
-# increase the memory footprint for raft log, which reduces the chance to use snapshot in catch-ups
-# raft_log_memory_proportion=0.2
-
-# deletion check period of the submitted log
-# log_deletion_check_interval_second=-1
-
-# Whether creating schema automatically is enabled, this will replace the one in iotdb-engine.properties
-# enable_auto_create_schema=true
-
-# consistency level, now three consistency levels are supported: strong, mid, and weak.
-# Strong consistency means the server will first try to synchronize with the leader to get the
-# newest data, if failed(timeout), directly report an error to the user;
-# While mid consistency means the server will first try to synchronize with the leader,
-# but if failed(timeout), it will give up and just use current data it has cached before;
-# Weak consistency does not synchronize with the leader and simply use the local data
-# consistency_level=mid
-
-# Whether to use asynchronous server
-# is_use_async_server=false
-
-# Whether to use asynchronous applier
-# is_use_async_applier=true
-
-# is raft log persistence enabled
-# is_enable_raft_log_persistence=true
-
-# When a certain amount of raft log is reached, it will be flushed to disk
-# It is possible to lose at most flush_raft_log_threshold operations
-# flush_raft_log_threshold=10000
-
-# Size of log buffer in each RaftMember's LogManager(in byte).
-# raft_log_buffer_size=16777216
-
-# The maximum value of the raft log index stored in the memory per raft group,
-# These indexes are used to index the location of the log on the disk
-# max_raft_log_index_size_in_memory=10000
-
-# If leader finds too many uncommitted raft logs, raft group leader will wait for a short period of
-# time, and then append the raft log
-# uncommitted_raft_log_num_for_reject_threshold=500
-
-# If followers find too many committed raft logs have not been applied, followers will reject the raft
-# log sent by leader
-# unapplied_raft_log_num_for_reject_threshold=500
-
-# The maximum size of the raft log saved on disk for each file (in bytes) of each raft group.
-# The default size is 1GB
-# max_raft_log_persist_data_size_per_file=1073741824
-
-# The maximum number of persistent raft log files on disk per raft group,
-# So each raft group's log takes up disk space approximately equals
-# max_raft_log_persist_data_size_per_file*max_number_of_persist_raft_log_files
-# max_number_of_persist_raft_log_files=5
-
-# The maximum number of logs saved on the disk
-# max_persist_raft_log_number_on_disk=1000000
-
-# whether enable use persist log on disk to catch up when no logs found in memory, if set false,
-# will use snapshot to catch up when no logs found in memory.
-# enable_use_persist_log_on_disk_to_catch_up=false
-
-# The number of logs read on the disk at one time, which is mainly used to control the memory usage.
-# This value multiplied by the log size is about the amount of memory used to read logs from the disk at one time.
-# max_number_of_logs_per_fetch_on_disk=1000
-
-# When consistency level is set to mid, query will fail if the log lag exceeds max_read_log_lag
-# This default value is 1000
-# max_read_log_lag=1000
-
-# When a follower tries to sync log with the leader, sync will fail if the log Lag exceeds max_sync_log_lag.
-# This default value is 100000
-# max_sync_log_lag=100000
-
-# Max number of clients in a ClientPool of a member for one node. When the num of clients in the ClientPool exceeds this parameter, the ClientPool blocks the thread that obtains the client for waitClientTimeoutMS.
-# max_client_pernode_permember_number=1000
-
-# Max number of idle clients in a ClientPool of a member for one node. When the num of clients in the ClientPool exceeds this parameter, the ClientPool destroys the client when it returns.
-# max_idle_client_pernode_permember_number=500
-
-# If the number of connections created for a node exceeds `max_client_pernode_permember_number`,
-# we need to wait so much time for other connections to be released until timeout,
-# or a new connection will be created.
-# wait_client_timeout_ms=5000
diff --git a/cluster/src/assembly/resources/sbin/add-node.bat b/cluster/src/assembly/resources/sbin/add-node.bat
deleted file mode 100755
index d272cf90b9cf..000000000000
--- a/cluster/src/assembly/resources/sbin/add-node.bat
+++ /dev/null
@@ -1,133 +0,0 @@
-@REM
-@REM Licensed to the Apache Software Foundation (ASF) under one
-@REM or more contributor license agreements. See the NOTICE file
-@REM distributed with this work for additional information
-@REM regarding copyright ownership. The ASF licenses this file
-@REM to you under the Apache License, Version 2.0 (the
-@REM "License"); you may not use this file except in compliance
-@REM with the License. You may obtain a copy of the License at
-@REM
-@REM http://www.apache.org/licenses/LICENSE-2.0
-@REM
-@REM Unless required by applicable law or agreed to in writing,
-@REM software distributed under the License is distributed on an
-@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-@REM KIND, either express or implied. See the License for the
-@REM specific language governing permissions and limitations
-@REM under the License.
-@REM
-
-@echo off
-echo ````````````````````````
-echo Starting IoTDB (Cluster Mode)
-echo ````````````````````````
-
-PATH %PATH%;%JAVA_HOME%\bin\
-set "FULL_VERSION="
-set "MAJOR_VERSION="
-set "MINOR_VERSION="
-
-
-for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do (
- set "FULL_VERSION=%%j-%%k-%%l-%%m"
- IF "%%j" == "1" (
- set "MAJOR_VERSION=%%k"
- set "MINOR_VERSION=%%l"
- ) else (
- set "MAJOR_VERSION=%%j"
- set "MINOR_VERSION=%%k"
- )
-)
-
-set JAVA_VERSION=%MAJOR_VERSION%
-
-IF NOT %JAVA_VERSION% == 8 (
- IF NOT %JAVA_VERSION% == 11 (
- echo IoTDB only supports jdk8 or jdk11, please check your java version.
- goto finally
- )
-)
-
-if "%OS%" == "Windows_NT" setlocal
-
-pushd %~dp0..
-if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd%
-popd
-
-SET enable_printgc=false
-IF "%1" == "printgc" (
- SET enable_printgc=true
- SHIFT
-)
-
-SET IOTDB_CONF=%1
-IF "%IOTDB_CONF%" == "" (
- SET IOTDB_CONF=%IOTDB_HOME%\conf
-) ELSE (
- SET IOTDB_CONF="%IOTDB_CONF%"
-)
-
-SET IOTDB_LOGS=%IOTDB_HOME%\logs
-
-IF EXIST "%IOTDB_CONF%\iotdb-env.bat" (
- IF "%enable_printgc%" == "true" (
- CALL "%IOTDB_CONF%\iotdb-env.bat" printgc
- ) ELSE (
- CALL "%IOTDB_CONF%\iotdb-env.bat"
- )
-) ELSE IF EXIST "%IOTDB_HOME%/conf/iotdb-env.bat" (
- IF "%enable_printgc%" == "true" (
- CALL "%IOTDB_HOME%/conf/iotdb-env.bat" printgc
- ) ELSE (
- CALL "%IOTDB_HOME%/conf/iotdb-env.bat"
- )
-) ELSE (
- echo "can't find iotdb-env.bat"
-)
-
-@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS
-set CONF_PARAMS=-a
-if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.ClusterIoTDB
-if NOT DEFINED JAVA_HOME goto :err
-
-@REM -----------------------------------------------------------------------------
-@REM JVM Opts we'll use in legacy run or installation
-set JAVA_OPTS=-ea^
- -Dlogback.configurationFile="%IOTDB_CONF%\logback.xml"^
- -DIOTDB_HOME="%IOTDB_HOME%"^
- -DTSFILE_HOME="%IOTDB_HOME%"^
- -DTSFILE_CONF="%IOTDB_CONF%"^
- -DIOTDB_CONF="%IOTDB_CONF%"
-
-@REM ***** CLASSPATH library setting *****
-@REM Ensure that any user defined CLASSPATH variables are not used on startup
-set CLASSPATH="%IOTDB_HOME%\lib"
-
-@REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable.
-set CLASSPATH=%CLASSPATH%;"%IOTDB_HOME%\lib\*"
-set CLASSPATH=%CLASSPATH%;iotdb.IoTDB
-goto okClasspath
-
-:append
-set CLASSPATH=%CLASSPATH%;%1
-goto :eof
-
-@REM -----------------------------------------------------------------------------
-:okClasspath
-
-rem echo CLASSPATH: %CLASSPATH%
-
-"%JAVA_HOME%\bin\java" %ILLEGAL_ACCESS_PARAMS% %JAVA_OPTS% %IOTDB_HEAP_OPTS% -cp %CLASSPATH% %IOTDB_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS%
-goto finally
-
-:err
-echo JAVA_HOME environment variable must be set!
-pause
-
-
-@REM -----------------------------------------------------------------------------
-:finally
-
-pause
-
-ENDLOCAL
\ No newline at end of file
diff --git a/cluster/src/assembly/resources/sbin/add-node.sh b/cluster/src/assembly/resources/sbin/add-node.sh
deleted file mode 100755
index 321299902b7c..000000000000
--- a/cluster/src/assembly/resources/sbin/add-node.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-echo ---------------------
-echo "Starting IoTDB (Cluster Mode)"
-echo ---------------------
-
-if [ -z "${IOTDB_HOME}" ]; then
- export IOTDB_HOME="`dirname "$0"`/.."
-fi
-
-enable_printgc=false
-if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
- enable_printgc=true;
- shift
-fi
-
-IOTDB_CONF=$1
-if [ -z "${IOTDB_CONF}" ]; then
- export IOTDB_CONF=${IOTDB_HOME}/conf
-fi
-
-if [ -f "$IOTDB_CONF/iotdb-env.sh" ]; then
- if [ $enable_printgc == "true" ]; then
- . "$IOTDB_CONF/iotdb-env.sh" "printgc"
- else
- . "$IOTDB_CONF/iotdb-env.sh"
- fi
-elif [ -f "${IOTDB_HOME}/conf/iotdb-env.sh" ]; then
- if [ $enable_printgc == "true" ]; then
- . "${IOTDB_HOME}/conf/iotdb-env.sh" "printgc"
- else
- . "${IOTDB_HOME}/conf/iotdb-env.sh"
- fi
-else
- echo "can't find $IOTDB_CONF/iotdb-env.sh"
-fi
-
-
-if [ -n "$JAVA_HOME" ]; then
- for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
- if [ -x "$java" ]; then
- JAVA="$java"
- break
- fi
- done
-else
- JAVA=java
-fi
-
-if [ -z $JAVA ] ; then
- echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr
- exit 1;
-fi
-
-CLASSPATH=""
-for f in ${IOTDB_HOME}/lib/*.jar; do
- CLASSPATH=${CLASSPATH}":"$f
-done
-classname=org.apache.iotdb.cluster.ClusterIoTDB
-
-launch_service()
-{
- class="$1"
- iotdb_parms="-Dlogback.configurationFile=${IOTDB_CONF}/logback.xml"
- iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}"
- iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}"
- iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}"
- iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB"
- exec "$JAVA" $illegal_access_params $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" -a
- return $?
-}
-
-# Start up the service
-launch_service "$classname"
-
-exit $?
diff --git a/cluster/src/assembly/resources/sbin/nodetool.bat b/cluster/src/assembly/resources/sbin/nodetool.bat
deleted file mode 100755
index 7dd9111dbc8c..000000000000
--- a/cluster/src/assembly/resources/sbin/nodetool.bat
+++ /dev/null
@@ -1,58 +0,0 @@
-@REM
-@REM Licensed to the Apache Software Foundation (ASF) under one
-@REM or more contributor license agreements. See the NOTICE file
-@REM distributed with this work for additional information
-@REM regarding copyright ownership. The ASF licenses this file
-@REM to you under the Apache License, Version 2.0 (the
-@REM "License"); you may not use this file except in compliance
-@REM with the License. You may obtain a copy of the License at
-@REM
-@REM http://www.apache.org/licenses/LICENSE-2.0
-@REM
-@REM Unless required by applicable law or agreed to in writing,
-@REM software distributed under the License is distributed on an
-@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-@REM KIND, either express or implied. See the License for the
-@REM specific language governing permissions and limitations
-@REM under the License.
-@REM
-
-if "%OS%" == "Windows_NT" setlocal
-
-pushd %~dp0..
-if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD%
-popd
-
-if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.utils.nodetool.NodeTool
-if NOT DEFINED JAVA_HOME goto :err
-
-@REM -----------------------------------------------------------------------------
-@REM JVM Opts we'll use in legacy run or installation
-set JAVA_OPTS=-ea^
- -DIOTDB_HOME=%IOTDB_HOME%
-
-REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable.
-for %%i in ("%IOTDB_HOME%\lib\*.jar") do call :append "%%i"
-goto okClasspath
-
-:append
-set CLASSPATH=%CLASSPATH%;%1
-goto :eof
-
-REM -----------------------------------------------------------------------------
-:okClasspath
-
-"%JAVA_HOME%\bin\java" %JAVA_OPTS% -cp "%CLASSPATH%" %MAIN_CLASS% %*
-
-goto finally
-
-
-:err
-echo JAVA_HOME environment variable must be set!
-pause
-
-
-@REM -----------------------------------------------------------------------------
-:finally
-
-ENDLOCAL
\ No newline at end of file
diff --git a/cluster/src/assembly/resources/sbin/nodetool.sh b/cluster/src/assembly/resources/sbin/nodetool.sh
deleted file mode 100755
index ab005a722a00..000000000000
--- a/cluster/src/assembly/resources/sbin/nodetool.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-if [ -z "${IOTDB_HOME}" ]; then
- export IOTDB_HOME="$(cd "`dirname "$0"`"/..; pwd)"
-fi
-
-
-MAIN_CLASS=org.apache.iotdb.cluster.utils.nodetool.NodeTool
-
-
-CLASSPATH=""
-for f in ${IOTDB_HOME}/lib/*.jar; do
- CLASSPATH=${CLASSPATH}":"$f
-done
-
-
-if [ -n "$JAVA_HOME" ]; then
- for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
- if [ -x "$java" ]; then
- JAVA="$java"
- break
- fi
- done
-else
- JAVA=java
-fi
-
-exec "$JAVA" -cp "$CLASSPATH" "$MAIN_CLASS" "$@"
-
-exit $?
diff --git a/cluster/src/assembly/resources/sbin/remove-node.bat b/cluster/src/assembly/resources/sbin/remove-node.bat
deleted file mode 100755
index fc3202b711ce..000000000000
--- a/cluster/src/assembly/resources/sbin/remove-node.bat
+++ /dev/null
@@ -1,110 +0,0 @@
-@REM
-@REM Licensed to the Apache Software Foundation (ASF) under one
-@REM or more contributor license agreements. See the NOTICE file
-@REM distributed with this work for additional information
-@REM regarding copyright ownership. The ASF licenses this file
-@REM to you under the Apache License, Version 2.0 (the
-@REM "License"); you may not use this file except in compliance
-@REM with the License. You may obtain a copy of the License at
-@REM
-@REM http://www.apache.org/licenses/LICENSE-2.0
-@REM
-@REM Unless required by applicable law or agreed to in writing,
-@REM software distributed under the License is distributed on an
-@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-@REM KIND, either express or implied. See the License for the
-@REM specific language governing permissions and limitations
-@REM under the License.
-@REM
-
-@echo off
-echo ````````````````````````
-echo Starting to remove a node (Cluster Mode)
-echo ````````````````````````
-
-PATH %PATH%;%JAVA_HOME%\bin\
-set "FULL_VERSION="
-set "MAJOR_VERSION="
-set "MINOR_VERSION="
-
-
-for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do (
- set "FULL_VERSION=%%j-%%k-%%l-%%m"
- IF "%%j" == "1" (
- set "MAJOR_VERSION=%%k"
- set "MINOR_VERSION=%%l"
- ) else (
- set "MAJOR_VERSION=%%j"
- set "MINOR_VERSION=%%k"
- )
-)
-
-set JAVA_VERSION=%MAJOR_VERSION%
-
-IF NOT %JAVA_VERSION% == 8 (
- IF NOT %JAVA_VERSION% == 11 (
- echo IoTDB only supports jdk8 or jdk11, please check your java version.
- goto finally
- )
-)
-
-if "%OS%" == "Windows_NT" setlocal
-
-pushd %~dp0..
-if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd%
-popd
-
-set IOTDB_CONF=%IOTDB_HOME%\conf
-set IOTDB_LOGS=%IOTDB_HOME%\logs
-
-@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS
-set CONF_PARAMS=-r
-set is_conf_path=false
-for %%i in (%*) do (
- set CONF_PARAMS=!CONF_PARAMS! %%i
-)
-
-if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.ClusterIoTDB
-if NOT DEFINED JAVA_HOME goto :err
-
-@REM -----------------------------------------------------------------------------
-@REM JVM Opts we'll use in legacy run or installation
-set JAVA_OPTS=-ea^
- -Dlogback.configurationFile="%IOTDB_CONF%\logback.xml"^
- -DIOTDB_HOME="%IOTDB_HOME%"^
- -DTSFILE_HOME="%IOTDB_HOME%"^
- -DCLUSTER_CONF="%IOTDB_CONF%"^
- -DIOTDB_CONF="%IOTDB_CONF%"
-
-@REM ***** CLASSPATH library setting *****
-@REM Ensure that any user defined CLASSPATH variables are not used on startup
-set CLASSPATH="%IOTDB_HOME%\lib"
-
-@REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable.
-set CLASSPATH=%CLASSPATH%;"%IOTDB_HOME%\lib\*"
-set CLASSPATH=%CLASSPATH%;iotdb.ClusterIoTDB
-goto okClasspath
-
-:append
-set CLASSPATH=%CLASSPATH%;%1
-goto :eof
-
-@REM -----------------------------------------------------------------------------
-:okClasspath
-
-rem echo CLASSPATH: %CLASSPATH%
-
-"%JAVA_HOME%\bin\java" %JAVA_OPTS% %IOTDB_HEAP_OPTS% -cp %CLASSPATH% %IOTDB_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS%
-goto finally
-
-:err
-echo JAVA_HOME environment variable must be set!
-pause
-
-
-@REM -----------------------------------------------------------------------------
-:finally
-
-pause
-
-ENDLOCAL
\ No newline at end of file
diff --git a/cluster/src/assembly/resources/sbin/remove-node.sh b/cluster/src/assembly/resources/sbin/remove-node.sh
deleted file mode 100755
index 0cafad153564..000000000000
--- a/cluster/src/assembly/resources/sbin/remove-node.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-echo ---------------------
-echo "Starting to remove a node(Cluster Mode)"
-echo ---------------------
-
-if [ -z "${IOTDB_HOME}" ]; then
- export IOTDB_HOME="`dirname "$0"`/.."
-fi
-
-IOTDB_CONF=${IOTDB_HOME}/conf
-
-CONF_PARAMS="-r "$*
-
-if [ -n "$JAVA_HOME" ]; then
- for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
- if [ -x "$java" ]; then
- JAVA="$java"
- break
- fi
- done
-else
- JAVA=java
-fi
-
-if [ -z $JAVA ] ; then
- echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr
- exit 1;
-fi
-
-CLASSPATH=""
-for f in ${IOTDB_HOME}/lib/*.jar; do
- CLASSPATH=${CLASSPATH}":"$f
-done
-classname=org.apache.iotdb.cluster.ClusterIoTDB
-
-launch_service()
-{
- class="$1"
- iotdb_parms="-Dlogback.configurationFile=${IOTDB_CONF}/logback.xml"
- iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}"
- iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}"
- iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}"
- iotdb_parms="$iotdb_parms -DCLUSTER_CONF=${IOTDB_CONF}"
- iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB"
- exec "$JAVA" $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" $CONF_PARAMS
- return $?
-}
-
-# Start up the service
-launch_service "$classname"
-
-exit $?
diff --git a/cluster/src/assembly/resources/sbin/start-node.bat b/cluster/src/assembly/resources/sbin/start-node.bat
deleted file mode 100755
index a16e79b016a4..000000000000
--- a/cluster/src/assembly/resources/sbin/start-node.bat
+++ /dev/null
@@ -1,136 +0,0 @@
-@REM
-@REM Licensed to the Apache Software Foundation (ASF) under one
-@REM or more contributor license agreements. See the NOTICE file
-@REM distributed with this work for additional information
-@REM regarding copyright ownership. The ASF licenses this file
-@REM to you under the Apache License, Version 2.0 (the
-@REM "License"); you may not use this file except in compliance
-@REM with the License. You may obtain a copy of the License at
-@REM
-@REM http://www.apache.org/licenses/LICENSE-2.0
-@REM
-@REM Unless required by applicable law or agreed to in writing,
-@REM software distributed under the License is distributed on an
-@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-@REM KIND, either express or implied. See the License for the
-@REM specific language governing permissions and limitations
-@REM under the License.
-@REM
-
-@echo off
-echo ````````````````````````
-echo Starting IoTDB (Cluster Mode)
-echo ````````````````````````
-
-PATH %PATH%;%JAVA_HOME%\bin\
-set "FULL_VERSION="
-set "MAJOR_VERSION="
-set "MINOR_VERSION="
-
-
-for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do (
- set "FULL_VERSION=%%j-%%k-%%l-%%m"
- IF "%%j" == "1" (
- set "MAJOR_VERSION=%%k"
- set "MINOR_VERSION=%%l"
- ) else (
- set "MAJOR_VERSION=%%j"
- set "MINOR_VERSION=%%k"
- )
-)
-
-set JAVA_VERSION=%MAJOR_VERSION%
-
-@REM we do not check jdk that version less than 1.8 because they are too stale...
-IF "%JAVA_VERSION%" == "6" (
- echo IoTDB only supports jdk >= 8, please check your java version.
- goto finally
-)
-IF "%JAVA_VERSION%" == "7" (
- echo IoTDB only supports jdk >= 8, please check your java version.
- goto finally
-)
-
-
-if "%OS%" == "Windows_NT" setlocal
-
-pushd %~dp0..
-if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd%
-popd
-
-SET enable_printgc=false
-IF "%1" == "printgc" (
- SET enable_printgc=true
- SHIFT
-)
-
-SET IOTDB_CONF=%1
-IF "%IOTDB_CONF%" == "" (
- SET IOTDB_CONF=%IOTDB_HOME%\conf
-) ELSE (
- SET IOTDB_CONF="%IOTDB_CONF%"
-)
-
-SET IOTDB_LOGS=%IOTDB_HOME%\logs
-
-IF EXIST "%IOTDB_CONF%\iotdb-env.bat" (
- IF "%enable_printgc%" == "true" (
- CALL "%IOTDB_CONF%\iotdb-env.bat" printgc
- ) ELSE (
- CALL "%IOTDB_CONF%\iotdb-env.bat"
- )
-) ELSE IF EXIST "%IOTDB_HOME%/conf/iotdb-env.bat" (
- IF "%enable_printgc%" == "true" (
- CALL "%IOTDB_HOME%/conf/iotdb-env.bat" printgc
- ) ELSE (
- CALL "%IOTDB_HOME%/conf/iotdb-env.bat"
- )
-) ELSE (
- echo "can't find iotdb-env.bat"
-)
-
-@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS
-set CONF_PARAMS=-s
-if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.ClusterIoTDB
-if NOT DEFINED JAVA_HOME goto :err
-
-@REM -----------------------------------------------------------------------------
-@REM JVM Opts we'll use in legacy run or installation
-set JAVA_OPTS=-ea^
- -Dlogback.configurationFile="%IOTDB_CONF%\logback.xml"^
- -DIOTDB_HOME="%IOTDB_HOME%"^
- -DTSFILE_HOME="%IOTDB_HOME%"^
- -DTSFILE_CONF="%IOTDB_CONF%"^
- -DIOTDB_CONF="%IOTDB_CONF%"
-
-@REM ***** CLASSPATH library setting *****
-@REM Ensure that any user defined CLASSPATH variables are not used on startup
-set CLASSPATH="%IOTDB_HOME%\lib\*"
-
-@REM this special suffix 'iotdb.ClusterIoTDB' is mandatory as stop-node.bat uses it to filter the process id.
-set CLASSPATH=%CLASSPATH%;iotdb.ClusterIoTDB
-goto okClasspath
-
-:append
-set CLASSPATH=%CLASSPATH%;%1
-goto :eof
-
-@REM -----------------------------------------------------------------------------
-:okClasspath
-
-rem echo CLASSPATH: %CLASSPATH%
-
-"%JAVA_HOME%\bin\java" %ILLEGAL_ACCESS_PARAMS% %JAVA_OPTS% %IOTDB_HEAP_OPTS% -cp %CLASSPATH% %IOTDB_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS%
-goto finally
-
-:err
-echo JAVA_HOME environment variable must be set!
-pause
-
-
-@REM -----------------------------------------------------------------------------
-:finally
-
-pause
-
-ENDLOCAL
diff --git a/cluster/src/assembly/resources/sbin/start-node.sh b/cluster/src/assembly/resources/sbin/start-node.sh
deleted file mode 100755
index 31a84f58a135..000000000000
--- a/cluster/src/assembly/resources/sbin/start-node.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-echo ---------------------
-echo "Starting IoTDB (Cluster Mode)"
-echo ---------------------
-
-if [ -z "${IOTDB_HOME}" ]; then
- export IOTDB_HOME="`dirname "$0"`/.."
-fi
-
-enable_printgc=false
-if [ "$#" -ge "1" -a "$1" == "printgc" ]; then
- enable_printgc=true;
- shift
-fi
-
-IOTDB_CONF=$1
-if [ -z "${IOTDB_CONF}" ]; then
- export IOTDB_CONF=${IOTDB_HOME}/conf
-fi
-
-if [ -f "$IOTDB_CONF/iotdb-env.sh" ]; then
- if [ $enable_printgc == "true" ]; then
- . "$IOTDB_CONF/iotdb-env.sh" "printgc"
- else
- . "$IOTDB_CONF/iotdb-env.sh"
- fi
-elif [ -f "${IOTDB_HOME}/conf/iotdb-env.sh" ]; then
- if [ $enable_printgc == "true" ]; then
- . "${IOTDB_HOME}/conf/iotdb-env.sh" "printgc"
- else
- . "${IOTDB_HOME}/conf/iotdb-env.sh"
- fi
-else
- echo "can't find $IOTDB_CONF/iotdb-env.sh"
-fi
-
-CONF_PARAMS="-s "$*
-
-if [ -n "$JAVA_HOME" ]; then
- for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do
- if [ -x "$java" ]; then
- JAVA="$java"
- break
- fi
- done
-else
- JAVA=java
-fi
-
-if [ -z $JAVA ] ; then
- echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr
- exit 1;
-fi
-
-CLASSPATH=""
-for f in ${IOTDB_HOME}/lib/*.jar; do
- CLASSPATH=${CLASSPATH}":"$f
-done
-classname=org.apache.iotdb.cluster.ClusterIoTDB
-
-launch_service()
-{
- class="$1"
- iotdb_parms="-Dlogback.configurationFile=${IOTDB_CONF}/logback.xml"
- iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}"
- iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}"
- iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}"
- iotdb_parms="$iotdb_parms -DTSFILE_CONF=${IOTDB_CONF}"
- iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB"
- exec "$JAVA" $illegal_access_params $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" $CONF_PARAMS
- return $?
-}
-
-# Start up the service
-launch_service "$classname"
-
-exit $?
diff --git a/cluster/src/assembly/resources/sbin/stop-node.bat b/cluster/src/assembly/resources/sbin/stop-node.bat
deleted file mode 100644
index 29c54c9e944c..000000000000
--- a/cluster/src/assembly/resources/sbin/stop-node.bat
+++ /dev/null
@@ -1,27 +0,0 @@
-@REM
-@REM Licensed to the Apache Software Foundation (ASF) under one
-@REM or more contributor license agreements. See the NOTICE file
-@REM distributed with this work for additional information
-@REM regarding copyright ownership. The ASF licenses this file
-@REM to you under the Apache License, Version 2.0 (the
-@REM "License"); you may not use this file except in compliance
-@REM with the License. You may obtain a copy of the License at
-@REM
-@REM http://www.apache.org/licenses/LICENSE-2.0
-@REM
-@REM Unless required by applicable law or agreed to in writing,
-@REM software distributed under the License is distributed on an
-@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-@REM KIND, either express or implied. See the License for the
-@REM specific language governing permissions and limitations
-@REM under the License.
-@REM
-
-@echo off
-
-pushd..
-set exec_dir=%cd%
-popd
-set exec_dir=%exec_dir:\=\\%
-wmic process where (commandline like "%%iotdb.ClusterIoTDB%%" and not name="wmic.exe" and commandline like "%%%exec_dir%%%") delete
-
diff --git a/cluster/src/assembly/resources/sbin/stop-node.sh b/cluster/src/assembly/resources/sbin/stop-node.sh
deleted file mode 100644
index 4d670c495432..000000000000
--- a/cluster/src/assembly/resources/sbin/stop-node.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-PIDS=$(ps ax | grep -i 'ClusterIoTDB' | grep java | grep -v grep | awk '{print $1}')
-sig=0
-for every_pid in ${PIDS}
-do
- cwd_path=$(ls -l /proc/$every_pid | grep "cwd ->" | grep -v grep | awk '{print $NF}')
- pwd_path=$(/bin/pwd)
- if [[ $pwd_path =~ $cwd_path ]]; then
- kill -s TERM $every_pid
- echo "close IoTDB"
- sig=1
- fi
-done
-
-if [ $sig -eq 0 ]; then
- echo "No IoTDB server to stop"
- exit 1
-fi
-
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java
deleted file mode 100644
index a8003e2a4c50..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java
+++ /dev/null
@@ -1,469 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster;
-
-import org.apache.iotdb.common.rpc.thrift.TSStatus;
-import org.apache.iotdb.commons.conf.IoTDBConstant;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.jdbc.Config;
-import org.apache.iotdb.rpc.IoTDBConnectionException;
-import org.apache.iotdb.rpc.RpcTransportFactory;
-import org.apache.iotdb.rpc.StatementExecutionException;
-import org.apache.iotdb.rpc.TSStatusCode;
-import org.apache.iotdb.service.rpc.thrift.TSCloseOperationReq;
-import org.apache.iotdb.service.rpc.thrift.TSCloseSessionReq;
-import org.apache.iotdb.service.rpc.thrift.TSCreateTimeseriesReq;
-import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementReq;
-import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp;
-import org.apache.iotdb.service.rpc.thrift.TSIService;
-import org.apache.iotdb.service.rpc.thrift.TSIService.Client;
-import org.apache.iotdb.service.rpc.thrift.TSIService.Client.Factory;
-import org.apache.iotdb.service.rpc.thrift.TSInsertStringRecordReq;
-import org.apache.iotdb.service.rpc.thrift.TSOpenSessionReq;
-import org.apache.iotdb.service.rpc.thrift.TSOpenSessionResp;
-import org.apache.iotdb.service.rpc.thrift.TSProtocolVersion;
-import org.apache.iotdb.session.SessionDataSet;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema;
-import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.DefaultParser;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.time.ZoneId;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-@SuppressWarnings("java:S106")
-public class ClientMain {
-
- private static final Logger logger = LoggerFactory.getLogger(ClientMain.class);
-
- private static final String PARAM_INSERTION = "i";
- private static final String PARAM_QUERY = "q";
- private static final String PARAM_DELETE_STORAGE_GROUP = "dsg";
- private static final String PARAM_DELETE_SERIES = "ds";
- private static final String PARAM_QUERY_PORTS = "qp";
- private static final String PARAM_INSERT_PORT = "ip";
- private static final String PARAM_BATCH = "b";
- private static Options options = new Options();
-
- private static String ip = "127.0.0.1";
- private static int port = 6667;
-
- static {
- options.addOption(new Option(PARAM_INSERTION, "Perform insertion"));
- options.addOption(new Option(PARAM_QUERY, "Perform query"));
- options.addOption(new Option(PARAM_DELETE_SERIES, "Perform deleting timeseries"));
- options.addOption(new Option(PARAM_DELETE_STORAGE_GROUP, "Perform deleting storage group"));
- options.addOption(
- new Option(PARAM_QUERY_PORTS, true, "Ports to query (ip is currently " + "localhost)"));
- options.addOption(new Option(PARAM_INSERT_PORT, true, "Port to perform insertion"));
- options.addOption(new Option(PARAM_BATCH, "Test batch statement"));
- }
-
- private static Map failedQueries;
-
- private static final String[] STORAGE_GROUPS =
- new String[] {
- "root.beijing", "root.shanghai", "root.guangzhou", "root.shenzhen",
- };
-
- private static final String[] DEVICES =
- new String[] {
- "root.beijing.d1", "root.shanghai.d1", "root.guangzhou.d1", "root.shenzhen.d1",
- };
-
- private static final String[] MEASUREMENTS = new String[] {"s1"};
-
- private static final TSDataType[] DATA_TYPES = new TSDataType[] {TSDataType.DOUBLE};
-
- private static List schemas;
-
- private static final String[] DATA_QUERIES =
- new String[] {
- // raw data multi series
- "SELECT * FROM root",
- "SELECT * FROM root WHERE time <= 691200000",
- "SELECT * FROM root WHERE time >= 391200000 and time <= 691200000",
- "SELECT * FROM root.*.* WHERE s1 <= 0.7",
- // raw data single series
- "SELECT s1 FROM root.beijing.d1",
- "SELECT s1 FROM root.shanghai.d1",
- "SELECT s1 FROM root.guangzhou.d1",
- "SELECT s1 FROM root.shenzhen.d1",
- // aggregation
- "SELECT count(s1) FROM root.*.*",
- "SELECT avg(s1) FROM root.*.*",
- "SELECT sum(s1) FROM root.*.*",
- "SELECT max_value(s1) FROM root.*.*",
- "SELECT count(s1) FROM root.*.* where time <= 691200000",
- "SELECT count(s1) FROM root.*.* where s1 <= 0.7",
- // group by device
- "SELECT * FROM root GROUP BY DEVICE",
- // fill
- "SELECT s1 FROM root.beijing.d1 WHERE time = 86400000 FILL (DOUBLE[PREVIOUS,1d])",
- "SELECT s1 FROM root.shanghai.d1 WHERE time = 86400000 FILL (DOUBLE[LINEAR,1d,1d])",
- "SELECT s1 FROM root.guangzhou.d1 WHERE time = 126400000 FILL (DOUBLE[PREVIOUS,1d])",
- "SELECT s1 FROM root.shenzhen.d1 WHERE time = 126400000 FILL (DOUBLE[LINEAR,1d,1d])",
- // group by
- "SELECT COUNT(*) FROM root.*.* GROUP BY ([0, 864000000), 3d, 3d)",
- "SELECT AVG(*) FROM root.*.* WHERE s1 <= 0.7 GROUP BY ([0, 864000000), 3d, 3d)",
- // last
- "SELECT LAST s1 FROM root.*.*",
- };
-
- private static final String[] META_QUERY =
- new String[] {
- "SHOW STORAGE GROUP",
- "SHOW TIMESERIES root",
- "COUNT TIMESERIES root",
- "COUNT TIMESERIES root GROUP BY LEVEL=2",
- "SHOW DEVICES",
- "SHOW TIMESERIES root limit 1 offset 1",
- };
-
- public static void main(String[] args)
- throws TException, StatementExecutionException, IoTDBConnectionException, ParseException,
- SQLException, ClassNotFoundException {
- CommandLineParser parser = new DefaultParser();
- CommandLine commandLine = parser.parse(options, args);
- boolean noOption = args.length == 0;
-
- failedQueries = new HashMap<>();
- prepareSchema();
-
- if (commandLine.hasOption(PARAM_INSERT_PORT)) {
- port = Integer.parseInt(commandLine.getOptionValue(PARAM_INSERT_PORT));
- }
-
- doInsertion(noOption, commandLine);
-
- doQuery(noOption, commandLine);
-
- doDeleteSeries(noOption, commandLine);
-
- doDeleteSG(noOption, commandLine);
-
- doBatchStmt(noOption, commandLine);
- }
-
- private static void doInsertion(boolean noOption, CommandLine commandLine) throws TException {
- if (noOption || commandLine.hasOption(PARAM_INSERTION)) {
- System.out.println("Test insertion");
- Client client = getClient(ip, port);
- long sessionId = connectClient(client);
- testInsertion(client, sessionId);
- client.closeSession(new TSCloseSessionReq(sessionId));
- }
- }
-
- private static void doQuery(boolean noOption, CommandLine commandLine)
- throws StatementExecutionException, TException, IoTDBConnectionException {
- if (noOption || commandLine.hasOption(PARAM_QUERY)) {
- int[] queryPorts = null;
- if (commandLine.hasOption(PARAM_QUERY_PORTS)) {
- queryPorts = parseIntArray(commandLine.getOptionValue(PARAM_QUERY_PORTS));
- }
- if (queryPorts == null) {
- queryPorts = new int[] {port, port + 1, port + 2};
- }
- for (int queryPort : queryPorts) {
- System.out.println("Test port: " + queryPort);
-
- Client client = getClient(ip, queryPort);
- long sessionId = connectClient(client);
- System.out.println("Test data queries");
- testQuery(client, sessionId, DATA_QUERIES);
-
- System.out.println("Test metadata queries");
- testQuery(client, sessionId, META_QUERY);
-
- logger.info("Failed queries: {}", failedQueries);
- client.closeSession(new TSCloseSessionReq(sessionId));
- }
- }
- }
-
- private static void doDeleteSeries(boolean noOption, CommandLine commandLine) throws TException {
- if (noOption || commandLine.hasOption(PARAM_DELETE_SERIES)) {
- System.out.println("Test delete timeseries");
- Client client = getClient(ip, port);
- long sessionId = connectClient(client);
- testDeleteTimeseries(client, sessionId);
- client.closeSession(new TSCloseSessionReq(sessionId));
- }
- }
-
- private static void doDeleteSG(boolean noOption, CommandLine commandLine)
- throws StatementExecutionException, TException, IoTDBConnectionException {
- if (noOption || commandLine.hasOption(PARAM_DELETE_STORAGE_GROUP)) {
- System.out.println("Test delete storage group");
- Client client = getClient(ip, port);
- long sessionId = connectClient(client);
- testDeleteStorageGroup(client, sessionId);
- client.closeSession(new TSCloseSessionReq(sessionId));
- }
- }
-
- private static void doBatchStmt(boolean noOption, CommandLine commandLine)
- throws SQLException, ClassNotFoundException {
- if (noOption || commandLine.hasOption(PARAM_BATCH)) {
- System.out.println("Test batch create sgs");
- testBatch(ip, port);
- }
- }
-
- private static int[] parseIntArray(String str) {
- if (str == null) {
- return new int[0];
- }
- String[] split = str.split(",");
- int[] ret = new int[split.length];
- for (int i = 0; i < split.length; i++) {
- ret[i] = Integer.parseInt(split[i]);
- }
- return ret;
- }
-
- private static long connectClient(Client client) throws TException {
- TSOpenSessionReq openReq =
- new TSOpenSessionReq(
- TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V3, ZoneId.systemDefault().getId());
- openReq.setUsername("root");
- openReq.setPassword("root");
- TSOpenSessionResp openResp = client.openSession(openReq);
- return openResp.getSessionId();
- }
-
- @SuppressWarnings({"java:S2095"}) // the transport is used later
- private static Client getClient(String ip, int port) throws TTransportException {
- TSIService.Client.Factory factory = new Factory();
- TTransport transport = RpcTransportFactory.INSTANCE.getTransportWithNoTimeout(ip, port);
- transport.open();
- TProtocol protocol =
- IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable()
- ? new TCompactProtocol(transport)
- : new TBinaryProtocol(transport);
- return factory.getClient(protocol);
- }
-
- private static void prepareSchema() {
- schemas = new ArrayList<>();
- for (String device : DEVICES) {
- for (int i = 0; i < MEASUREMENTS.length; i++) {
- String measurement = MEASUREMENTS[i];
- schemas.add(
- new MeasurementSchema(
- device + IoTDBConstant.PATH_SEPARATOR + measurement, DATA_TYPES[i]));
- }
- }
- }
-
- private static void testQuery(Client client, long sessionId, String[] queries)
- throws TException, StatementExecutionException, IoTDBConnectionException {
- long statementId = client.requestStatementId(sessionId);
- for (String dataQuery : queries) {
- executeQuery(client, sessionId, dataQuery, statementId);
- }
-
- TSCloseOperationReq tsCloseOperationReq = new TSCloseOperationReq(sessionId);
- tsCloseOperationReq.setStatementId(statementId);
- client.closeOperation(tsCloseOperationReq);
- }
-
- private static void executeQuery(Client client, long sessionId, String query, long statementId)
- throws TException, StatementExecutionException, IoTDBConnectionException {
- if (logger.isInfoEnabled()) {
- logger.info("{ {} }", query);
- }
- TSExecuteStatementResp resp =
- client.executeQueryStatement(
- new TSExecuteStatementReq(sessionId, query, statementId).setFetchSize(1000));
- if (resp.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
- failedQueries.put(query, resp.status);
- return;
- }
-
- long queryId = resp.getQueryId();
- if (logger.isInfoEnabled()) {
- logger.info(resp.columns.toString());
- }
-
- SessionDataSet dataSet =
- new SessionDataSet(
- query,
- resp.getColumns(),
- resp.getDataTypeList(),
- resp.columnNameIndexMap,
- queryId,
- statementId,
- client,
- sessionId,
- resp.queryDataSet,
- false);
-
- while (dataSet.hasNext()) {
- if (logger.isInfoEnabled()) {
- logger.info(dataSet.next().toString());
- }
- }
- System.out.println();
-
- TSCloseOperationReq tsCloseOperationReq = new TSCloseOperationReq(sessionId);
- tsCloseOperationReq.setQueryId(queryId);
- client.closeOperation(tsCloseOperationReq);
- }
-
- private static void testDeleteStorageGroup(Client client, long sessionId)
- throws TException, StatementExecutionException, IoTDBConnectionException {
- if (logger.isInfoEnabled()) {
- logger.info(client.deleteStorageGroups(sessionId, Arrays.asList(STORAGE_GROUPS)).toString());
- }
-
- testQuery(client, sessionId, new String[] {"SELECT * FROM root"});
- }
-
- private static void registerTimeseries(long sessionId, Client client) throws TException {
- TSCreateTimeseriesReq req = new TSCreateTimeseriesReq();
- req.setSessionId(sessionId);
- for (IMeasurementSchema schema : schemas) {
- req.setDataType(schema.getType().ordinal());
- req.setEncoding(schema.getEncodingType().ordinal());
- req.setCompressor(schema.getCompressor().ordinal());
- req.setPath(schema.getMeasurementId());
- if (logger.isInfoEnabled()) {
- logger.info(client.createTimeseries(req).toString());
- }
- }
- }
-
- @SuppressWarnings("ConstantConditions")
- private static void testInsertion(Client client, long sessionId) throws TException {
- for (String storageGroup : STORAGE_GROUPS) {
- if (logger.isInfoEnabled()) {
- logger.info(client.setStorageGroup(sessionId, storageGroup).toString());
- }
- }
-
- registerTimeseries(sessionId, client);
-
- TSInsertStringRecordReq insertReq = new TSInsertStringRecordReq();
- insertReq.setMeasurements(Arrays.asList(MEASUREMENTS));
- insertReq.setSessionId(sessionId);
-
- for (int i = 0; i < 10; i++) {
- List values = new ArrayList<>(MEASUREMENTS.length);
- insertReq.setTimestamp(i * 24 * 3600 * 1000L);
- for (int i1 = 0; i1 < MEASUREMENTS.length; i1++) {
- switch (DATA_TYPES[i1]) {
- case DOUBLE:
- values.add(Double.toString(i * 0.1));
- break;
- case BOOLEAN:
- values.add(Boolean.toString(i % 2 == 0));
- break;
- case INT64:
- values.add(Long.toString(i));
- break;
- case INT32:
- values.add(Integer.toString(i));
- break;
- case FLOAT:
- values.add(Float.toString(i * 0.1f));
- break;
- case TEXT:
- values.add("S" + i);
- break;
- }
- }
-
- insertReq.setValues(values);
-
- for (String device : DEVICES) {
- insertReq.setPrefixPath(device);
- if (logger.isInfoEnabled()) {
- logger.info(insertReq.toString());
- logger.info(client.insertStringRecord(insertReq).toString());
- }
- }
- }
- }
-
- private static void testDeleteTimeseries(Client client, long sessionId) throws TException {
- List paths = new ArrayList<>();
- for (String measurement : MEASUREMENTS) {
- for (String device : DEVICES) {
- paths.add(device + "." + measurement);
- }
- }
- if (logger.isInfoEnabled()) {
- logger.info(client.deleteTimeseries(sessionId, paths).toString());
- }
- }
-
- private static void testBatch(String ip, int port) throws ClassNotFoundException, SQLException {
- Class.forName(Config.JDBC_DRIVER_NAME);
- try (Connection connection =
- DriverManager.getConnection(
- Config.IOTDB_URL_PREFIX + String.format("%s:%d/", ip, port), "root", "root");
- Statement statement = connection.createStatement()) {
-
- statement.addBatch("SET STORAGE GROUP TO root.batch1");
- statement.addBatch("SET STORAGE GROUP TO root.batch2");
- statement.addBatch("SET STORAGE GROUP TO root.batch3");
- statement.addBatch("SET STORAGE GROUP TO root.batch4");
-
- statement.executeBatch();
- statement.clearBatch();
-
- try (ResultSet set = statement.executeQuery("SHOW STORAGE GROUP")) {
- int colNum = set.getMetaData().getColumnCount();
- while (set.next()) {
- StringBuilder stringBuilder = new StringBuilder();
- for (int i = 0; i < colNum; i++) {
- stringBuilder.append(set.getString(i + 1)).append(",");
- }
- System.out.println(stringBuilder);
- }
- }
- }
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java
deleted file mode 100644
index c642632206c0..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster;
-
-import org.apache.iotdb.cluster.server.member.MetaGroupMember;
-import org.apache.iotdb.db.engine.flush.TsFileFlushPolicy;
-import org.apache.iotdb.db.engine.storagegroup.DataRegion;
-import org.apache.iotdb.db.engine.storagegroup.TsFileProcessor;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-public class ClusterFileFlushPolicy implements TsFileFlushPolicy {
-
- private static final Logger logger = LoggerFactory.getLogger(ClusterFileFlushPolicy.class);
-
- private ExecutorService closePartitionExecutor;
- private MetaGroupMember metaGroupMember;
-
- public ClusterFileFlushPolicy(MetaGroupMember metaGroupMember) {
- this.metaGroupMember = metaGroupMember;
- this.closePartitionExecutor =
- new ThreadPoolExecutor(
- 16,
- 1024,
- 0,
- TimeUnit.SECONDS,
- new LinkedBlockingDeque<>(),
- r -> {
- Thread thread = new Thread(r);
- thread.setName("ClusterFileFlushPolicy-" + thread.getId());
- return thread;
- });
- }
-
- @Override
- public void apply(DataRegion dataRegion, TsFileProcessor processor, boolean isSeq) {
- logger.info(
- "The memtable size reaches the threshold, async flush it to tsfile: {}",
- processor.getTsFileResource().getTsFile().getAbsolutePath());
-
- if (processor.shouldClose()) {
- // find the related DataGroupMember and close the processor through it
- // we execute it in another thread to avoid deadlocks
- closePartitionExecutor.submit(
- () ->
- metaGroupMember.closePartition(
- dataRegion.getDataRegionId(), processor.getTimeRangeId(), isSeq));
- }
- // flush the memtable anyway to avoid the insertion trigger the policy again
- processor.asyncFlush();
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDB.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDB.java
deleted file mode 100644
index 2910a3fd473f..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDB.java
+++ /dev/null
@@ -1,672 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster;
-
-import org.apache.iotdb.cluster.client.ClientCategory;
-import org.apache.iotdb.cluster.client.ClientManager;
-import org.apache.iotdb.cluster.client.IClientManager;
-import org.apache.iotdb.cluster.client.async.AsyncDataClient;
-import org.apache.iotdb.cluster.client.async.AsyncMetaClient;
-import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor;
-import org.apache.iotdb.cluster.client.sync.SyncDataClient;
-import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterConstant;
-import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.cluster.coordinator.Coordinator;
-import org.apache.iotdb.cluster.exception.ConfigInconsistentException;
-import org.apache.iotdb.cluster.exception.StartUpCheckFailureException;
-import org.apache.iotdb.cluster.metadata.CSchemaProcessor;
-import org.apache.iotdb.cluster.metadata.MetaPuller;
-import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable;
-import org.apache.iotdb.cluster.partition.slot.SlotStrategy;
-import org.apache.iotdb.cluster.query.manage.ClusterSessionManager;
-import org.apache.iotdb.cluster.rpc.thrift.Node;
-import org.apache.iotdb.cluster.server.ClusterRPCService;
-import org.apache.iotdb.cluster.server.ClusterTSServiceImpl;
-import org.apache.iotdb.cluster.server.HardLinkCleaner;
-import org.apache.iotdb.cluster.server.Response;
-import org.apache.iotdb.cluster.server.basic.ClusterServiceProvider;
-import org.apache.iotdb.cluster.server.clusterinfo.ClusterInfoServer;
-import org.apache.iotdb.cluster.server.member.MetaGroupMember;
-import org.apache.iotdb.cluster.server.monitor.NodeReport;
-import org.apache.iotdb.cluster.server.raft.DataRaftHeartBeatService;
-import org.apache.iotdb.cluster.server.raft.DataRaftService;
-import org.apache.iotdb.cluster.server.raft.MetaRaftHeartBeatService;
-import org.apache.iotdb.cluster.server.raft.MetaRaftService;
-import org.apache.iotdb.cluster.server.service.DataGroupEngine;
-import org.apache.iotdb.cluster.server.service.DataGroupServiceImpls;
-import org.apache.iotdb.cluster.server.service.MetaAsyncService;
-import org.apache.iotdb.cluster.server.service.MetaSyncService;
-import org.apache.iotdb.cluster.utils.ClusterUtils;
-import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitor;
-import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory;
-import org.apache.iotdb.commons.conf.IoTDBConstant;
-import org.apache.iotdb.commons.exception.ConfigurationException;
-import org.apache.iotdb.commons.exception.StartupException;
-import org.apache.iotdb.commons.service.JMXService;
-import org.apache.iotdb.commons.service.RegisterManager;
-import org.apache.iotdb.commons.service.ThriftServiceThread;
-import org.apache.iotdb.commons.utils.TestOnly;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.conf.IoTDBStartCheck;
-import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.service.IoTDB;
-import org.apache.iotdb.db.service.basic.ServiceProvider;
-
-import org.apache.thrift.TException;
-import org.apache.thrift.async.TAsyncClientManager;
-import org.apache.thrift.protocol.TBinaryProtocol.Factory;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.iotdb.cluster.config.ClusterConstant.THREAD_POLL_WAIT_TERMINATION_TIME_S;
-import static org.apache.iotdb.cluster.utils.ClusterUtils.UNKNOWN_CLIENT_IP;
-
-/** we do not inherent IoTDB instance, as it may break the singleton mode of IoTDB. */
-public class ClusterIoTDB implements ClusterIoTDBMBean {
-
- private static final Logger logger = LoggerFactory.getLogger(ClusterIoTDB.class);
- private final String mbeanName =
- String.format(
- "%s:%s=%s", "org.apache.iotdb.cluster.service", IoTDBConstant.JMX_TYPE, "ClusterIoTDB");
-
- // TODO: better to throw exception if the client can not be get. Then we can remove this field.
- private boolean printClientConnectionErrorStack = false;
-
- private MetaGroupMember metaGroupMember;
-
- private DataGroupEngine dataGroupEngine;
-
- private Node thisNode;
- private Coordinator coordinator;
-
- private final IoTDB iotdb = IoTDB.getInstance();
-
- // Cluster IoTDB uses a individual registerManager with its parent.
- private final RegisterManager registerManager = new RegisterManager();
-
- /**
- * a single thread pool, every "REPORT_INTERVAL_SEC" seconds, "reportThread" will print the status
- * of all raft members in this node.
- */
- private ScheduledExecutorService reportThread;
-
- private boolean allowReport = true;
-
- /** hardLinkCleaner will periodically clean expired hardlinks created during snapshots. */
- private ScheduledExecutorService hardLinkCleanerThread;
-
- /**
- * The clientManager is only used by those instances who do not belong to any DataGroup or
- * MetaGroup.
- */
- private IClientManager clientManager;
-
- private ClusterIoTDB() {
- // we do not init anything here, so that we can re-initialize the instance in IT.
- }
-
- /** initialize the current node and its services */
- public boolean initLocalEngines() {
- ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
- thisNode = new Node();
- // set internal rpc ip and ports
- thisNode.setInternalIp(config.getInternalIp());
- thisNode.setMetaPort(config.getInternalMetaPort());
- thisNode.setDataPort(config.getInternalDataPort());
- // set client rpc ip and ports
- thisNode.setClientPort(config.getClusterRpcPort());
- thisNode.setClientIp(IoTDBDescriptor.getInstance().getConfig().getRpcAddress());
- coordinator = new Coordinator();
- // local engine
- TProtocolFactory protocolFactory =
- ThriftServiceThread.getProtocolFactory(
- IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable());
- metaGroupMember = new MetaGroupMember(protocolFactory, thisNode, coordinator);
- IoTDB.setClusterMode();
- IoTDB.setSchemaProcessor(CSchemaProcessor.getInstance());
- ((CSchemaProcessor) IoTDB.schemaProcessor).setMetaGroupMember(metaGroupMember);
- ((CSchemaProcessor) IoTDB.schemaProcessor).setCoordinator(coordinator);
- MetaPuller.getInstance().init(metaGroupMember);
- // set coordinator for serviceProvider construction
- try {
- IoTDB.setServiceProvider(new ClusterServiceProvider(coordinator, metaGroupMember));
- } catch (QueryProcessException e) {
- logger.error("Failed to set clusterServiceProvider.", e);
- stop();
- return false;
- }
-
- // from the scope of the DataGroupEngine,it should be singleton pattern
- // the way of setting MetaGroupMember in DataGroupEngine may need a better modification in
- // future commit.
- DataGroupEngine.setProtocolFactory(protocolFactory);
- DataGroupEngine.setMetaGroupMember(metaGroupMember);
- dataGroupEngine = DataGroupEngine.getInstance();
- clientManager =
- new ClientManager(
- ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(),
- ClientManager.Type.RequestForwardClient);
- initTasks();
- try {
- // we need to check config after initLocalEngines.
- startServerCheck();
- JMXService.registerMBean(metaGroupMember, metaGroupMember.getMBeanName());
- } catch (StartupException e) {
- logger.error("Failed to check cluster config.", e);
- stop();
- return false;
- }
- return true;
- }
-
- private void initTasks() {
- reportThread = IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor("NodeReportThread");
- reportThread.scheduleAtFixedRate(
- this::generateNodeReport,
- ClusterConstant.REPORT_INTERVAL_SEC,
- ClusterConstant.REPORT_INTERVAL_SEC,
- TimeUnit.SECONDS);
- hardLinkCleanerThread =
- IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor("HardLinkCleaner");
- hardLinkCleanerThread.scheduleAtFixedRate(
- new HardLinkCleaner(),
- ClusterConstant.CLEAN_HARDLINK_INTERVAL_SEC,
- ClusterConstant.CLEAN_HARDLINK_INTERVAL_SEC,
- TimeUnit.SECONDS);
- }
-
- /**
- * Generate a report containing the status of both MetaGroupMember and DataGroupMembers of this
- * node. This will help to see if the node is in a consistent and right state during debugging.
- */
- private void generateNodeReport() {
- if (logger.isDebugEnabled() && allowReport) {
- try {
- NodeReport report = new NodeReport(thisNode);
- report.setMetaMemberReport(metaGroupMember.genMemberReport());
- report.setDataMemberReportList(dataGroupEngine.genMemberReports());
- logger.debug(report.toString());
- } catch (Exception e) {
- logger.error("exception occurred when generating node report", e);
- }
- }
- }
-
- public static void main(String[] args) {
- new ClusterIoTDBServerCommandLine().doMain(args);
- }
-
- protected boolean serverCheckAndInit() throws ConfigurationException, IOException {
- IoTDBStartCheck.getInstance().checkConfig();
- IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
- // init server's configuration first, because the cluster configuration may read settings from
- // the server's configuration.
- // auto create schema is took over by cluster module, so we disable it in the server module.
- config.setAutoCreateSchemaEnabled(false);
- // check cluster config
- String checkResult = clusterConfigCheck();
- if (checkResult != null) {
- logger.error(checkResult);
- return false;
- }
- ClusterConfig clusterConfig = ClusterDescriptor.getInstance().getConfig();
- // if client ip is the default address, set it same with internal ip
- if (config.getRpcAddress().equals("0.0.0.0")) {
- config.setRpcAddress(clusterConfig.getInternalIp());
- }
- // set the memory allocated for raft log of each raft log manager
- if (clusterConfig.getReplicationNum() > 1) {
- clusterConfig.setMaxMemorySizeForRaftLog(
- (long)
- (config.getAllocateMemoryForWrite()
- * clusterConfig.getRaftLogMemoryProportion()
- / clusterConfig.getReplicationNum()));
- // calculate remaining memory allocated for write process
- config.setAllocateMemoryForWrite(
- (long)
- (config.getAllocateMemoryForWrite()
- * (1 - clusterConfig.getRaftLogMemoryProportion())));
- }
- return true;
- }
-
- private String clusterConfigCheck() {
- try {
- ClusterDescriptor.getInstance().replaceHostnameWithIp();
- } catch (Exception e) {
- return String.format("replace hostname with ip failed, %s", e.getMessage());
- }
- ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
- // check the initial replicateNum and refuse to start when the replicateNum <= 0
- if (config.getReplicationNum() <= 0) {
- return String.format(
- "ReplicateNum should be greater than 0 instead of %d.", config.getReplicationNum());
- }
- // check the initial cluster size and refuse to start when the size < quorum
- int quorum = config.getReplicationNum() / 2 + 1;
- if (config.getSeedNodeUrls().size() < quorum) {
- return String.format(
- "Seed number less than quorum, seed number: %s, quorum: " + "%s.",
- config.getSeedNodeUrls().size(), quorum);
- }
- // TODO: duplicate code
- Set seedNodes = new HashSet<>();
- for (String url : config.getSeedNodeUrls()) {
- Node node = ClusterUtils.parseNode(url);
- if (seedNodes.contains(node)) {
- return String.format(
- "SeedNodes must not repeat each other. SeedNodes: %s", config.getSeedNodeUrls());
- }
- seedNodes.add(node);
- }
- return null;
- }
-
- /** Start as a seed node */
- public void activeStartNodeMode() {
- try {
- // start iotdb server first
- IoTDB.getInstance().active();
- // some work about cluster
- preInitCluster();
- // try to build cluster
- metaGroupMember.buildCluster();
- // register service after cluster build
- postInitCluster();
- // init ServiceImpl to handle request of client
- startClientRPC();
- } catch (StartupException
- | StartUpCheckFailureException
- | ConfigInconsistentException
- | QueryProcessException e) {
- logger.error("Fail to start server", e);
- stop();
- }
- }
-
- private void preInitCluster() throws StartupException {
- stopRaftInfoReport();
- JMXService.registerMBean(this, mbeanName);
- // register MetaGroupMember. MetaGroupMember has the same position with "StorageEngine" in the
- // cluster module.
- // TODO: it is better to remove coordinator out of metaGroupEngine
-
- registerManager.register(metaGroupMember);
- registerManager.register(dataGroupEngine);
-
- // rpc service initialize
- DataGroupServiceImpls dataGroupServiceImpls = new DataGroupServiceImpls();
- if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) {
- MetaAsyncService metaAsyncService = new MetaAsyncService(metaGroupMember);
- MetaRaftHeartBeatService.getInstance().initAsyncedServiceImpl(metaAsyncService);
- MetaRaftService.getInstance().initAsyncedServiceImpl(metaAsyncService);
- DataRaftService.getInstance().initAsyncedServiceImpl(dataGroupServiceImpls);
- DataRaftHeartBeatService.getInstance().initAsyncedServiceImpl(dataGroupServiceImpls);
- } else {
- MetaSyncService syncService = new MetaSyncService(metaGroupMember);
- MetaRaftHeartBeatService.getInstance().initSyncedServiceImpl(syncService);
- MetaRaftService.getInstance().initSyncedServiceImpl(syncService);
- DataRaftService.getInstance().initSyncedServiceImpl(dataGroupServiceImpls);
- DataRaftHeartBeatService.getInstance().initSyncedServiceImpl(dataGroupServiceImpls);
- }
- // start RPC service
- logger.info("start Meta Heartbeat RPC service... ");
- registerManager.register(MetaRaftHeartBeatService.getInstance());
- /* TODO: better to start the Meta RPC service until the heartbeatService has elected the leader
- and quorum of followers have caught up. */
- logger.info("start Meta RPC service... ");
- registerManager.register(MetaRaftService.getInstance());
- }
-
- private void postInitCluster() throws StartupException {
- logger.info("start Data Heartbeat RPC service... ");
- registerManager.register(DataRaftHeartBeatService.getInstance());
- logger.info("start Data RPC service... ");
- registerManager.register(DataRaftService.getInstance());
- // RPC based DBA API
- registerManager.register(ClusterInfoServer.getInstance());
- // JMX based DBA API
- registerManager.register(ClusterMonitor.INSTANCE);
- }
-
- private void startClientRPC() throws QueryProcessException, StartupException {
- // we must wait until the metaGroup established.
- // So that the ClusterRPCService can work.
- ClusterTSServiceImpl clusterServiceImpl = new ClusterTSServiceImpl();
- ServiceProvider.SESSION_MANAGER = ClusterSessionManager.getInstance();
- ClusterSessionManager.getInstance().setCoordinator(coordinator);
- ClusterRPCService.getInstance().initSyncedServiceImpl(clusterServiceImpl);
- registerManager.register(ClusterRPCService.getInstance());
- // init influxDB MManager
- if (IoTDBDescriptor.getInstance().getConfig().isEnableInfluxDBRpcService()) {
- IoTDB.initInfluxDBMManager();
- }
- }
-
- /** Be added to the cluster by seed nodes */
- public void activeAddNodeMode() {
- try {
- final long startTime = System.currentTimeMillis();
- preInitCluster();
- metaGroupMember.joinCluster();
- postInitCluster();
- dataGroupEngine.pullSnapshots();
- startClientRPC();
- logger.info(
- "Adding this node {} to cluster costs {} ms",
- thisNode,
- (System.currentTimeMillis() - startTime));
- } catch (StartupException
- | QueryProcessException
- | StartUpCheckFailureException
- | ConfigInconsistentException e) {
- stop();
- logger.error("Fail to join cluster", e);
- }
- }
-
- private void startServerCheck() throws StartupException {
- ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
- // assert not duplicated nodes
- Set seedNodes = new HashSet<>();
- for (String url : config.getSeedNodeUrls()) {
- Node node = ClusterUtils.parseNode(url);
- if (seedNodes.contains(node)) {
- String message =
- String.format(
- "SeedNodes must not repeat each other. SeedNodes: %s", config.getSeedNodeUrls());
- throw new StartupException(metaGroupMember.getName(), message);
- }
- seedNodes.add(node);
- }
-
- // assert this node is in all nodes when restart
- if (!metaGroupMember.getAllNodes().isEmpty()) {
- if (!metaGroupMember.getAllNodes().contains(metaGroupMember.getThisNode())) {
- String message =
- String.format(
- "All nodes in partitionTables must contains local node in start-server mode. "
- + "LocalNode: %s, AllNodes: %s",
- metaGroupMember.getThisNode(), metaGroupMember.getAllNodes());
- throw new StartupException(metaGroupMember.getName(), message);
- } else {
- return;
- }
- }
-
- // assert this node is in seed nodes list
- if (!seedNodes.contains(thisNode)) {
- String message =
- String.format(
- "SeedNodes must contains local node in start-server mode. LocalNode: %s ,SeedNodes: "
- + "%s",
- thisNode.toString(), config.getSeedNodeUrls());
- throw new StartupException(metaGroupMember.getName(), message);
- }
- }
-
- protected void doRemoveNode(String[] args) throws IOException {
- if (args.length != 3) {
- logger.error("Usage: ");
- return;
- }
- String ip = args[1];
- int metaPort = Integer.parseInt(args[2]);
- ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
- TProtocolFactory factory =
- config.isRpcThriftCompressionEnabled() ? new TCompactProtocol.Factory() : new Factory();
- Node nodeToRemove = new Node();
- nodeToRemove.setInternalIp(ip).setMetaPort(metaPort).setClientIp(UNKNOWN_CLIENT_IP);
- // try sending the request to each seed node
- for (String url : config.getSeedNodeUrls()) {
- Node node = ClusterUtils.parseNode(url);
- if (node == null) {
- continue;
- }
- AsyncMetaClient client =
- new AsyncMetaClient(factory, new TAsyncClientManager(), node, ClientCategory.META);
- Long response = null;
- long startTime = System.currentTimeMillis();
- try {
- logger.info("Start removing node {} with the help of node {}", nodeToRemove, node);
- response = SyncClientAdaptor.removeNode(client, nodeToRemove);
- } catch (TException e) {
- logger.warn("Cannot send remove node request through {}, try next node", node);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- logger.warn("Cannot send remove node request through {}, try next node", node);
- }
- if (response != null) {
- handleNodeRemovalResp(response, nodeToRemove, startTime);
- return;
- }
- }
- }
-
- private void handleNodeRemovalResp(Long response, Node nodeToRemove, long startTime) {
- if (response == Response.RESPONSE_AGREE) {
- logger.info(
- "Node {} is successfully removed, cost {}ms",
- nodeToRemove,
- (System.currentTimeMillis() - startTime));
- } else if (response == Response.RESPONSE_CLUSTER_TOO_SMALL) {
- logger.error("Cluster size is too small, cannot remove any node");
- } else if (response == Response.RESPONSE_REJECT) {
- logger.error("Node {} is not found in the cluster, please check", nodeToRemove);
- } else if (response == Response.RESPONSE_DATA_MIGRATION_NOT_FINISH) {
- logger.warn(
- "The data migration of the previous membership change operation is not finished. Please "
- + "try again later");
- } else {
- logger.error("Unexpected response {}", response);
- }
- }
-
- /** Developers may perform pre-start customizations here for debugging or experiments. */
- @SuppressWarnings("java:S125") // leaving examples
- private void preStartCustomize() {
- // customize data distribution
- // The given example tries to divide storage groups like "root.sg_1", "root.sg_2"... into k
- // nodes evenly, and use default strategy for other groups
- SlotPartitionTable.setSlotStrategy(
- new SlotStrategy() {
- final SlotStrategy defaultStrategy = new SlotStrategy.DefaultStrategy();
- final int clusterSize =
- ClusterDescriptor.getInstance().getConfig().getSeedNodeUrls().size();
-
- @Override
- public int calculateSlotByTime(String storageGroupName, long timestamp, int maxSlotNum) {
- int sgSerialNum = extractSerialNumInSGName(storageGroupName) % clusterSize;
- if (sgSerialNum >= 0) {
- return maxSlotNum / clusterSize * sgSerialNum;
- } else {
- return defaultStrategy.calculateSlotByTime(storageGroupName, timestamp, maxSlotNum);
- }
- }
-
- @Override
- public int calculateSlotByPartitionNum(
- String storageGroupName, long partitionId, int maxSlotNum) {
- int sgSerialNum = extractSerialNumInSGName(storageGroupName) % clusterSize;
- if (sgSerialNum >= 0) {
- return maxSlotNum / clusterSize * sgSerialNum;
- } else {
- return defaultStrategy.calculateSlotByPartitionNum(
- storageGroupName, partitionId, maxSlotNum);
- }
- }
-
- private int extractSerialNumInSGName(String storageGroupName) {
- String[] s = storageGroupName.split("_");
- if (s.length != 2) {
- return -1;
- }
- try {
- return Integer.parseInt(s[1]);
- } catch (NumberFormatException e) {
- return -1;
- }
- }
- });
- }
-
- public void stop() {
- deactivate();
- }
-
- private void deactivate() {
- logger.info("Deactivating Cluster IoTDB...");
- stopThreadPools();
- registerManager.deregisterAll();
- JMXService.deregisterMBean(mbeanName);
- logger.info("ClusterIoTDB is deactivated.");
- // stop the iotdb kernel
- iotdb.stop();
- }
-
- private void stopThreadPools() {
- stopThreadPool(reportThread, "reportThread");
- stopThreadPool(hardLinkCleanerThread, "hardLinkCleanerThread");
- }
-
- private void stopThreadPool(ExecutorService pool, String name) {
- if (pool != null) {
- pool.shutdownNow();
- try {
- pool.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- logger.error("Unexpected interruption when waiting for {} to end", name, e);
- }
- }
- }
-
- @TestOnly
- public void setClientManager(IClientManager clientManager) {
- this.clientManager = clientManager;
- }
-
- @TestOnly
- public IClientManager getClientManager() {
- return this.clientManager;
- }
-
- @TestOnly
- public void setDataGroupEngine(DataGroupEngine dataGroupEngine) {
- this.dataGroupEngine = dataGroupEngine;
- }
-
- public MetaGroupMember getMetaGroupMember() {
- return metaGroupMember;
- }
-
- public Node getThisNode() {
- return thisNode;
- }
-
- public Coordinator getCoordinator() {
- return coordinator;
- }
-
- public IoTDB getIotdb() {
- return iotdb;
- }
-
- public RegisterManager getRegisterManager() {
- return registerManager;
- }
-
- public DataGroupEngine getDataGroupEngine() {
- return dataGroupEngine;
- }
-
- public void setMetaGroupMember(MetaGroupMember metaGroupMember) {
- this.metaGroupMember = metaGroupMember;
- }
-
- public static ClusterIoTDB getInstance() {
- return ClusterIoTDBHolder.INSTANCE;
- }
-
- @Override
- public boolean startRaftInfoReport() {
- logger.info("Raft status report is enabled.");
- allowReport = true;
- return logger.isDebugEnabled();
- }
-
- @Override
- public void stopRaftInfoReport() {
- logger.info("Raft status report is disabled.");
- allowReport = false;
- }
-
- @Override
- public void enablePrintClientConnectionErrorStack() {
- printClientConnectionErrorStack = true;
- }
-
- @Override
- public void disablePrintClientConnectionErrorStack() {
- printClientConnectionErrorStack = false;
- }
-
- public boolean shouldPrintClientConnectionErrorStack() {
- return printClientConnectionErrorStack;
- }
-
- public SyncDataClient getSyncDataClient(Node node, int readOperationTimeoutMS)
- throws IOException {
- SyncDataClient dataClient =
- (SyncDataClient) clientManager.borrowSyncClient(node, ClientCategory.DATA);
- if (dataClient != null) {
- dataClient.setTimeout(readOperationTimeoutMS);
- }
- return dataClient;
- }
-
- public AsyncDataClient getAsyncDataClient(Node node, int readOperationTimeoutMS)
- throws IOException {
- AsyncDataClient dataClient =
- (AsyncDataClient) clientManager.borrowAsyncClient(node, ClientCategory.DATA);
- if (dataClient != null) {
- dataClient.setTimeout(readOperationTimeoutMS);
- }
- return dataClient;
- }
-
- private static class ClusterIoTDBHolder {
-
- private static final ClusterIoTDB INSTANCE = new ClusterIoTDB();
-
- private ClusterIoTDBHolder() {}
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBMBean.java
deleted file mode 100644
index 7fedf0f3d90a..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBMBean.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster;
-
-// we do not inherent IoTDB instance, as it may break the singleton mode of IoTDB.
-public interface ClusterIoTDBMBean {
- /**
- * try to enable the raft info report.
- *
- * @return true only if the log level is lower than debug and the report is enabled.
- */
- boolean startRaftInfoReport();
-
- void stopRaftInfoReport();
-
- void enablePrintClientConnectionErrorStack();
-
- void disablePrintClientConnectionErrorStack();
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBServerCommandLine.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBServerCommandLine.java
deleted file mode 100644
index 220fbb643d2b..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBServerCommandLine.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.cluster;
-
-import org.apache.iotdb.commons.ServerCommandLine;
-import org.apache.iotdb.commons.exception.ConfigurationException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-public class ClusterIoTDBServerCommandLine extends ServerCommandLine {
- private static final Logger logger = LoggerFactory.getLogger(ClusterIoTDBServerCommandLine.class);
-
- // establish the cluster as a seed
- private static final String MODE_START = "-s";
- // join an established cluster
- private static final String MODE_ADD = "-a";
- // send a request to remove a node, more arguments: ip-of-removed-node
- // metaport-of-removed-node
- private static final String MODE_REMOVE = "-r";
-
- private static final String USAGE =
- "Usage: <-s|-a|-r> "
- + "[-D{} ] \n"
- + "-s: start the node as a seed\n"
- + "-a: start the node as a new node\n"
- + "-r: remove the node out of the cluster\n";
-
- @Override
- protected String getUsage() {
- return USAGE;
- }
-
- @Override
- protected int run(String[] args) {
- if (args.length < 1) {
- usage(null);
- return -1;
- }
-
- ClusterIoTDB cluster = ClusterIoTDB.getInstance();
- // check config of iotdb,and set some configs in cluster mode
- try {
- if (!cluster.serverCheckAndInit()) {
- return -1;
- }
- } catch (ConfigurationException | IOException e) {
- logger.error("meet error when doing start checking", e);
- return -1;
- }
- String mode = args[0];
- logger.info("Running mode {}", mode);
-
- // initialize the current node and its services
- if (!cluster.initLocalEngines()) {
- logger.error("initLocalEngines error, stop process!");
- return -1;
- }
-
- // we start IoTDB kernel first. then we start the cluster module.
- if (MODE_START.equals(mode)) {
- cluster.activeStartNodeMode();
- } else if (MODE_ADD.equals(mode)) {
- cluster.activeAddNodeMode();
- } else if (MODE_REMOVE.equals(mode)) {
- try {
- cluster.doRemoveNode(args);
- } catch (IOException e) {
- logger.error("Fail to remove node in cluster", e);
- }
- } else {
- logger.error("Unrecognized mode {}", mode);
- }
- return 0;
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java b/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
deleted file mode 100644
index e8878880d08e..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster;
-
-import org.apache.iotdb.cluster.rpc.thrift.Node;
-import org.apache.iotdb.cluster.utils.NodeSerializeUtils;
-import org.apache.iotdb.commons.utils.SerializeUtils;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus;
-
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Objects;
-
-public class RemoteTsFileResource extends TsFileResource {
-
- private Node source;
- private boolean isRemote = false;
- private boolean withModification = false;
-
- /**
- * Whether the plan range ([minPlanIndex, maxPlanIndex]) overlaps with another TsFile in the same
- * time partition. If not (unique = true), we shall have confidence that the file has all data
- * whose plan indexes are within [minPlanIndex, maxPlanIndex], so we can remove other local files
- * that overlaps with it.
- */
- private boolean isPlanRangeUnique = false;
-
- public RemoteTsFileResource() {
- setStatus(TsFileResourceStatus.CLOSED);
- this.timeIndex = IoTDBDescriptor.getInstance().getConfig().getTimeIndexLevel().getTimeIndex();
- }
-
- private RemoteTsFileResource(TsFileResource other) throws IOException {
- super(other);
- withModification = new File(getModFile().getFilePath()).exists();
- setStatus(TsFileResourceStatus.CLOSED);
- }
-
- public RemoteTsFileResource(TsFileResource other, Node source) throws IOException {
- this(other);
- this.source = source;
- this.isRemote = true;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- if (!super.equals(o)) {
- return false;
- }
- RemoteTsFileResource that = (RemoteTsFileResource) o;
- return Objects.equals(source, that.source);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(super.hashCode(), source);
- }
-
- public void serialize(DataOutputStream dataOutputStream) {
- NodeSerializeUtils.serialize(source, dataOutputStream);
- try {
- // the path here is only for the remote node to get a download link, so it does not matter
- // if it is absolute
- SerializeUtils.serialize(getTsFile().getPath(), dataOutputStream);
-
- timeIndex.serialize(dataOutputStream);
- dataOutputStream.writeBoolean(withModification);
-
- dataOutputStream.writeLong(maxPlanIndex);
- dataOutputStream.writeLong(minPlanIndex);
-
- dataOutputStream.writeByte(isPlanRangeUnique ? 1 : 0);
- } catch (IOException ignored) {
- // unreachable
- }
- }
-
- public void deserialize(ByteBuffer buffer) {
- source = new Node();
- NodeSerializeUtils.deserialize(source, buffer);
- setFile(new File(SerializeUtils.deserializeString(buffer)));
-
- timeIndex =
- IoTDBDescriptor.getInstance()
- .getConfig()
- .getTimeIndexLevel()
- .getTimeIndex()
- .deserialize(buffer);
-
- withModification = buffer.get() == 1;
-
- maxPlanIndex = buffer.getLong();
- minPlanIndex = buffer.getLong();
-
- isPlanRangeUnique = buffer.get() == 1;
-
- isRemote = true;
- }
-
- public Node getSource() {
- return source;
- }
-
- public boolean isRemote() {
- return isRemote;
- }
-
- public void setRemote(boolean remote) {
- isRemote = remote;
- }
-
- public boolean isWithModification() {
- return withModification;
- }
-
- public boolean isPlanRangeUnique() {
- return isPlanRangeUnique;
- }
-
- public void setPlanRangeUnique(boolean planRangeUnique) {
- isPlanRangeUnique = planRangeUnique;
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/BaseFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/BaseFactory.java
deleted file mode 100644
index 1c60df6b8748..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/BaseFactory.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.client;
-
-import org.apache.commons.pool2.KeyedPooledObjectFactory;
-import org.apache.commons.pool2.PooledObject;
-import org.apache.thrift.async.TAsyncClientManager;
-import org.apache.thrift.protocol.TProtocolFactory;
-
-import java.util.concurrent.atomic.AtomicInteger;
-
-public abstract class BaseFactory implements KeyedPooledObjectFactory {
-
- protected TAsyncClientManager[] managers;
- protected TProtocolFactory protocolFactory;
- protected AtomicInteger clientCnt = new AtomicInteger();
- protected ClientCategory category;
- protected IClientManager clientPoolManager;
-
- protected BaseFactory(TProtocolFactory protocolFactory, ClientCategory category) {
- this.protocolFactory = protocolFactory;
- this.category = category;
- }
-
- protected BaseFactory(
- TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) {
- this.protocolFactory = protocolFactory;
- this.category = category;
- this.clientPoolManager = clientManager;
- }
-
- @Override
- public void activateObject(K node, PooledObject pooledObject) throws Exception {}
-
- @Override
- public void passivateObject(K node, PooledObject pooledObject) throws Exception {}
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientCategory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientCategory.java
deleted file mode 100644
index fdd6669845a9..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientCategory.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.client;
-
-public enum ClientCategory {
- META("MetaClient"),
- META_HEARTBEAT("MetaHeartbeatClient"),
- DATA("DataClient"),
- DATA_HEARTBEAT("DataHeartbeatClient"),
- DATA_ASYNC_APPEND_CLIENT("DataAsyncAppendClient");
-
- private final String name;
-
- ClientCategory(String name) {
- this.name = name;
- }
-
- public String getName() {
- return name;
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientManager.java
deleted file mode 100644
index ae24a6dea9e7..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientManager.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.client;
-
-import org.apache.iotdb.cluster.rpc.thrift.Node;
-import org.apache.iotdb.cluster.rpc.thrift.RaftService;
-
-import com.google.common.collect.Maps;
-import org.apache.commons.pool2.KeyedObjectPool;
-import org.apache.thrift.transport.TTransportException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Map;
-
-/**
- * One should borrow the reusable client from this manager and return the client after use. The
- * underlying client pool is powered by Apache Commons Pool. The class provided 3 default pool group
- * according to current usage: RequestForwardClient, DataGroupClients, MetaGroupClients.
- *
- *
TODO: We can refine the client structure by reorg the interfaces defined in cluster-thrift.
- */
-public class ClientManager implements IClientManager {
-
- private static final Logger logger = LoggerFactory.getLogger(ClientManager.class);
-
- private Map> asyncClientPoolMap;
- private Map> syncClientPoolMap;
- private final ClientPoolFactory clientPoolFactory;
-
- /**
- * {@link ClientManager.Type#RequestForwardClient} represents the clients used to forward external
- * client requests to proper node to handle such as query, insert request.
- *
- *
{@link ClientManager.Type#DataGroupClient} represents the clients used to appendEntry,
- * appendEntries, sendHeartbeat, etc for data raft group.
- *
- *
{@link ClientManager.Type#MetaGroupClient} represents the clients used to appendEntry,
- * appendEntries, sendHeartbeat, etc for meta raft group. *
- */
- public enum Type {
- RequestForwardClient,
- DataGroupClient,
- MetaGroupClient
- }
-
- public ClientManager(boolean isAsyncMode, Type type) {
- clientPoolFactory = new ClientPoolFactory();
- clientPoolFactory.setClientManager(this);
- if (isAsyncMode) {
- asyncClientPoolMap = Maps.newHashMap();
- constructAsyncClientMap(type);
- } else {
- syncClientPoolMap = Maps.newHashMap();
- constructSyncClientMap(type);
- }
- }
-
- private void constructAsyncClientMap(Type type) {
- switch (type) {
- case RequestForwardClient:
- asyncClientPoolMap.put(
- ClientCategory.DATA, clientPoolFactory.createAsyncDataPool(ClientCategory.DATA));
- break;
- case MetaGroupClient:
- asyncClientPoolMap.put(
- ClientCategory.META, clientPoolFactory.createAsyncMetaPool(ClientCategory.META));
- asyncClientPoolMap.put(
- ClientCategory.META_HEARTBEAT,
- clientPoolFactory.createAsyncMetaPool(ClientCategory.META_HEARTBEAT));
- break;
- case DataGroupClient:
- asyncClientPoolMap.put(
- ClientCategory.DATA, clientPoolFactory.createAsyncDataPool(ClientCategory.DATA));
- asyncClientPoolMap.put(
- ClientCategory.DATA_HEARTBEAT,
- clientPoolFactory.createAsyncDataPool(ClientCategory.DATA_HEARTBEAT));
- asyncClientPoolMap.put(
- ClientCategory.DATA_ASYNC_APPEND_CLIENT,
- clientPoolFactory.createSingleManagerAsyncDataPool());
- break;
- default:
- logger.warn("unsupported ClientManager type: {}", type);
- break;
- }
- }
-
- private void constructSyncClientMap(Type type) {
- switch (type) {
- case RequestForwardClient:
- syncClientPoolMap.put(
- ClientCategory.DATA, clientPoolFactory.createSyncDataPool(ClientCategory.DATA));
- break;
- case MetaGroupClient:
- syncClientPoolMap.put(
- ClientCategory.META, clientPoolFactory.createSyncMetaPool(ClientCategory.META));
- syncClientPoolMap.put(
- ClientCategory.META_HEARTBEAT,
- clientPoolFactory.createSyncMetaPool(ClientCategory.META_HEARTBEAT));
- break;
- case DataGroupClient:
- syncClientPoolMap.put(
- ClientCategory.DATA, clientPoolFactory.createSyncDataPool(ClientCategory.DATA));
- syncClientPoolMap.put(
- ClientCategory.DATA_HEARTBEAT,
- clientPoolFactory.createSyncDataPool(ClientCategory.DATA_HEARTBEAT));
- break;
- default:
- logger.warn("unsupported ClientManager type: {}", type);
- break;
- }
- }
-
- /**
- * It's safe to convert: 1. RaftService.AsyncClient to TSDataService.AsyncClient when category is
- * DATA or DATA_HEARTBEAT; 2. RaftService.AsyncClient to TSMetaService.AsyncClient when category
- * is META or META_HEARTBEAT.
- *
- * @return RaftService.AsyncClient
- */
- @Override
- public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category)
- throws IOException {
- KeyedObjectPool pool;
- RaftService.AsyncClient client = null;
- if (asyncClientPoolMap != null && (pool = asyncClientPoolMap.get(category)) != null) {
- try {
- client = pool.borrowObject(node);
- } catch (IOException e) {
- // external needs the IOException to check connection
- throw e;
- } catch (Exception e) {
- // external doesn't care of other exceptions
- logger.error("BorrowAsyncClient fail.", e);
- }
- } else {
- logger.warn(
- "BorrowSyncClient invoke on unsupported mode or category: Node:{}, ClientCategory:{}, "
- + "isSyncMode:{}",
- node,
- clientPoolFactory,
- syncClientPoolMap != null);
- }
- return client;
- }
-
- /**
- * It's safe to convert: 1. RaftService.Client to TSDataService.Client when category is DATA or
- * DATA_HEARTBEAT; 2. RaftService.Client to TSMetaService.Client when category is META or
- * META_HEARTBEAT.
- *
- * @return RaftService.Client
- */
- @Override
- public RaftService.Client borrowSyncClient(Node node, ClientCategory category)
- throws IOException {
- KeyedObjectPool pool;
- RaftService.Client client = null;
- if (syncClientPoolMap != null && (pool = syncClientPoolMap.get(category)) != null) {
- try {
- client = pool.borrowObject(node);
- } catch (TTransportException e) {
- // external needs to check transport related exception
- throw new IOException(e);
- } catch (IOException e) {
- // external needs the IOException to check connection
- throw e;
- } catch (Exception e) {
- // external doesn't care of other exceptions
- logger.error("BorrowSyncClient fail.", e);
- }
- } else {
- logger.warn(
- "BorrowSyncClient invoke on unsupported mode or category: Node:{}, ClientCategory:{}, "
- + "isSyncMode:{}",
- node,
- clientPoolFactory,
- syncClientPoolMap != null);
- }
- return client;
- }
-
- @Override
- public void returnAsyncClient(
- RaftService.AsyncClient client, Node node, ClientCategory category) {
- if (client != null && node != null) {
- try {
- asyncClientPoolMap.get(category).returnObject(node, client);
- } catch (Exception e) {
- logger.error("AsyncClient return error: {}", client, e);
- }
- }
- }
-
- @Override
- public void returnSyncClient(RaftService.Client client, Node node, ClientCategory category) {
- if (client != null && node != null) {
- try {
- syncClientPoolMap.get(category).returnObject(node, client);
- } catch (Exception e) {
- logger.error("SyncClient return error: {}", client, e);
- }
- }
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientPoolFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientPoolFactory.java
deleted file mode 100644
index 00db59adcbca..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientPoolFactory.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.client;
-
-import org.apache.iotdb.cluster.client.async.AsyncDataClient;
-import org.apache.iotdb.cluster.client.async.AsyncMetaClient;
-import org.apache.iotdb.cluster.client.sync.SyncDataClient;
-import org.apache.iotdb.cluster.client.sync.SyncMetaClient;
-import org.apache.iotdb.cluster.config.ClusterConfig;
-import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.cluster.rpc.thrift.Node;
-import org.apache.iotdb.cluster.rpc.thrift.RaftService;
-
-import org.apache.commons.pool2.impl.GenericKeyedObjectPool;
-import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocolFactory;
-
-import java.time.Duration;
-
-public class ClientPoolFactory {
-
- protected long waitClientTimeoutMS;
- protected int maxConnectionForEachNode;
- protected int maxIdleConnectionForEachNode;
- private final TProtocolFactory protocolFactory;
- private GenericKeyedObjectPoolConfig poolConfig;
- private IClientManager clientManager;
-
- public ClientPoolFactory() {
- ClusterConfig config = ClusterDescriptor.getInstance().getConfig();
- this.waitClientTimeoutMS = config.getWaitClientTimeoutMS();
- this.maxConnectionForEachNode = config.getMaxClientPerNodePerMember();
- this.maxIdleConnectionForEachNode = config.getMaxIdleClientPerNodePerMember();
- protocolFactory =
- config.isRpcThriftCompressionEnabled()
- ? new TCompactProtocol.Factory()
- : new TBinaryProtocol.Factory();
- poolConfig = new GenericKeyedObjectPoolConfig();
- poolConfig.setMaxTotalPerKey(maxConnectionForEachNode);
- poolConfig.setMaxIdlePerKey(maxIdleConnectionForEachNode);
- poolConfig.setMaxWait(Duration.ofMillis(waitClientTimeoutMS));
- poolConfig.setTestOnReturn(true);
- poolConfig.setTestOnBorrow(true);
- }
-
- public void setClientManager(IClientManager clientManager) {
- this.clientManager = clientManager;
- }
-
- public GenericKeyedObjectPool createSyncDataPool(
- ClientCategory category) {
- return new GenericKeyedObjectPool<>(
- new SyncDataClient.SyncDataClientFactory(protocolFactory, category, clientManager),
- poolConfig);
- }
-
- public GenericKeyedObjectPool createSyncMetaPool(
- ClientCategory category) {
- return new GenericKeyedObjectPool<>(
- new SyncMetaClient.SyncMetaClientFactory(protocolFactory, category, clientManager),
- poolConfig);
- }
-
- public GenericKeyedObjectPool createAsyncDataPool(
- ClientCategory category) {
- return new GenericKeyedObjectPool<>(
- new AsyncDataClient.AsyncDataClientFactory(protocolFactory, category, clientManager),
- poolConfig);
- }
-
- public GenericKeyedObjectPool createAsyncMetaPool(
- ClientCategory category) {
- return new GenericKeyedObjectPool<>(
- new AsyncMetaClient.AsyncMetaClientFactory(protocolFactory, category, clientManager),
- poolConfig);
- }
-
- public GenericKeyedObjectPool createSingleManagerAsyncDataPool() {
- return new GenericKeyedObjectPool<>(
- new AsyncDataClient.SingleManagerFactory(protocolFactory, clientManager), poolConfig);
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/IClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/IClientManager.java
deleted file mode 100644
index 6652e0c6e7c0..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/IClientManager.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.client;
-
-import org.apache.iotdb.cluster.rpc.thrift.Node;
-import org.apache.iotdb.cluster.rpc.thrift.RaftService;
-
-import java.io.IOException;
-
-public interface IClientManager {
- RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) throws IOException;
-
- RaftService.Client borrowSyncClient(Node node, ClientCategory category) throws IOException;
-
- void returnAsyncClient(RaftService.AsyncClient client, Node node, ClientCategory category);
-
- void returnSyncClient(RaftService.Client client, Node node, ClientCategory category);
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncBaseFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncBaseFactory.java
deleted file mode 100644
index b2b84153a69e..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncBaseFactory.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.client.async;
-
-import org.apache.iotdb.cluster.client.BaseFactory;
-import org.apache.iotdb.cluster.client.ClientCategory;
-import org.apache.iotdb.cluster.client.IClientManager;
-import org.apache.iotdb.cluster.config.ClusterDescriptor;
-import org.apache.iotdb.cluster.rpc.thrift.RaftService;
-
-import org.apache.thrift.async.TAsyncClientManager;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-public abstract class AsyncBaseFactory
- extends BaseFactory {
-
- private static final Logger logger = LoggerFactory.getLogger(AsyncBaseFactory.class);
-
- protected AsyncBaseFactory(TProtocolFactory protocolFactory, ClientCategory category) {
- super(protocolFactory, category);
- managers =
- new TAsyncClientManager
- [ClusterDescriptor.getInstance().getConfig().getSelectorNumOfClientPool()];
- for (int i = 0; i < managers.length; i++) {
- try {
- managers[i] = new TAsyncClientManager();
- } catch (IOException e) {
- logger.error("Cannot create data heartbeat client manager for factory", e);
- }
- }
- }
-
- protected AsyncBaseFactory(
- TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) {
- super(protocolFactory, category, clientManager);
- managers =
- new TAsyncClientManager
- [ClusterDescriptor.getInstance().getConfig().getSelectorNumOfClientPool()];
- for (int i = 0; i < managers.length; i++) {
- try {
- managers[i] = new TAsyncClientManager();
- } catch (IOException e) {
- logger.error("Cannot create data heartbeat client manager for factory", e);
- }
- }
- }
-}
diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
deleted file mode 100644
index ebfb7ded5a4d..000000000000
--- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.cluster.client.async;
-
-import org.apache.iotdb.cluster.client.BaseFactory;
-import org.apache.iotdb.cluster.client.ClientCategory;
-import org.apache.iotdb.cluster.client.IClientManager;
-import org.apache.iotdb.cluster.config.ClusterConstant;
-import org.apache.iotdb.cluster.rpc.thrift.Node;
-import org.apache.iotdb.cluster.rpc.thrift.TSDataService;
-import org.apache.iotdb.cluster.utils.ClientUtils;
-import org.apache.iotdb.commons.utils.TestOnly;
-import org.apache.iotdb.rpc.TNonblockingSocketWrapper;
-
-import org.apache.commons.pool2.PooledObject;
-import org.apache.commons.pool2.impl.DefaultPooledObject;
-import org.apache.thrift.async.TAsyncClientManager;
-import org.apache.thrift.async.TAsyncMethodCall;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.apache.thrift.transport.TNonblockingTransport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Notice: Because a client will be returned to a pool immediately after a successful request, you
- * should not cache it anywhere else.
- */
-public class AsyncDataClient extends TSDataService.AsyncClient {
-
- private static final Logger logger = LoggerFactory.getLogger(AsyncDataClient.class);
-
- private Node node;
- private ClientCategory category;
- private IClientManager clientManager;
-
- @TestOnly
- public AsyncDataClient(
- TProtocolFactory protocolFactory,
- TAsyncClientManager clientManager,
- TNonblockingTransport transport) {
- super(protocolFactory, clientManager, transport);
- }
-
- public AsyncDataClient(
- TProtocolFactory protocolFactory,
- TAsyncClientManager tClientManager,
- Node node,
- ClientCategory category)
- throws IOException {
- // the difference of the two clients lies in the port
- super(
- protocolFactory,
- tClientManager,
- TNonblockingSocketWrapper.wrap(
- node.getInternalIp(),
- ClientUtils.getPort(node, category),
- ClusterConstant.getConnectionTimeoutInMS()));
- this.node = node;
- this.category = category;
- }
-
- public AsyncDataClient(
- TProtocolFactory protocolFactory,
- TAsyncClientManager tClientManager,
- Node node,
- ClientCategory category,
- IClientManager manager)
- throws IOException {
- this(protocolFactory, tClientManager, node, category);
- this.clientManager = manager;
- }
-
- public void close() {
- ___transport.close();
- ___currentMethod = null;
- }
-
- public boolean isValid() {
- return ___transport != null;
- }
-
- /**
- * return self if clientPool is not null, the method doesn't need to call by user, it will trigger
- * once client transport complete.
- */
- private void returnSelf() {
- if (clientManager != null) {
- clientManager.returnAsyncClient(this, node, category);
- }
- }
-
- @Override
- public void onComplete() {
- super.onComplete();
- returnSelf();
- }
-
- @Override
- public String toString() {
- return "Async"
- + category.getName()
- + "{"
- + "node="
- + node
- + ","
- + "port="
- + ClientUtils.getPort(node, category)
- + '}';
- }
-
- public Node getNode() {
- return node;
- }
-
- public boolean isReady() {
- try {
- checkReady();
- return true;
- } catch (Exception e) {
- return false;
- }
- }
-
- @TestOnly
- TAsyncMethodCall