From 9f878d9b30a98b0d59d462974e9728a02c49f18f Mon Sep 17 00:00:00 2001
From: Nikolay <nizhikov@apache.org>
Date: Fri, 20 Oct 2023 15:03:02 +0300
Subject: [PATCH] IGNITE-20392 Move cassandra modules to extensions (#11007)

---
 assembly/dependencies-apache-ignite-lgpl.xml  |    1 -
 assembly/dependencies-apache-ignite-slim.xml  |    3 -
 assembly/dependencies-apache-ignite.xml       |    1 -
 assembly/libs/README.txt                      |    1 -
 modules/cassandra/README.txt                  |   28 -
 modules/cassandra/pom.xml                     |   72 -
 modules/cassandra/serializers/README.txt      |   34 -
 .../serializers/licenses/apache-2.0.txt       |  202 ---
 modules/cassandra/serializers/pom.xml         |  141 --
 .../cassandra/serializer/KryoSerializer.java  |   93 --
 .../cassandra/serializer/package-info.java    |   22 -
 .../ignite/tests/KryoSerializerTest.java      |   68 -
 .../java/org/apache/ignite/tests/MyPojo.java  |   97 --
 .../apache/ignite/tests/SerializerSuite.java  |   29 -
 modules/cassandra/store/README.txt            |   32 -
 .../cassandra/store/licenses/apache-2.0.txt   |  202 ---
 modules/cassandra/store/pom.xml               |  333 ----
 .../store/cassandra/CassandraCacheStore.java  |  546 ------
 .../cassandra/CassandraCacheStoreFactory.java |  201 ---
 .../cassandra/common/CassandraHelper.java     |  182 --
 .../common/PropertyMappingHelper.java         |  239 ---
 .../store/cassandra/common/RandomSleeper.java |  105 --
 .../store/cassandra/common/SystemHelper.java  |   46 -
 .../store/cassandra/common/package-info.java  |   22 -
 .../cassandra/datasource/Credentials.java     |   39 -
 .../cassandra/datasource/DataSource.java      |  658 --------
 .../datasource/PlainCredentials.java          |   53 -
 .../cassandra/datasource/package-info.java    |   22 -
 .../cache/store/cassandra/package-info.java   |   22 -
 .../persistence/KeyPersistenceSettings.java   |  249 ---
 .../KeyValuePersistenceSettings.java          |  531 ------
 .../persistence/PersistenceController.java    |  459 -----
 .../persistence/PersistenceSettings.java      |  557 -------
 .../persistence/PersistenceStrategy.java      |   62 -
 .../cassandra/persistence/PojoField.java      |  238 ---
 .../persistence/PojoFieldAccessor.java        |  161 --
 .../cassandra/persistence/PojoKeyField.java   |   97 --
 .../cassandra/persistence/PojoValueField.java |  157 --
 .../persistence/ValuePersistenceSettings.java |  100 --
 .../cassandra/persistence/package-info.java   |   22 -
 .../cassandra/serializer/JavaSerializer.java  |   80 -
 .../cassandra/serializer/Serializer.java      |   43 -
 .../cassandra/serializer/package-info.java    |   22 -
 .../session/BatchExecutionAssistant.java      |  102 --
 .../session/BatchLoaderAssistant.java         |   47 -
 .../cassandra/session/CassandraSession.java   |   69 -
 .../session/CassandraSessionImpl.java         | 1030 ------------
 .../cassandra/session/ExecutionAssistant.java |   84 -
 .../GenericBatchExecutionAssistant.java       |   71 -
 .../session/LoadCacheCustomQueryWorker.java   |  121 --
 .../session/WrappedPreparedStatement.java     |  180 --
 .../cassandra/session/WrappedSession.java     |   91 -
 .../store/cassandra/session/package-info.java |   22 -
 .../cassandra/session/pool/IdleSession.java   |   73 -
 .../cassandra/session/pool/SessionPool.java   |  174 --
 .../cassandra/session/pool/package-info.java  |   22 -
 .../session/transaction/BaseMutation.java     |   68 -
 .../session/transaction/DeleteMutation.java   |   57 -
 .../session/transaction/Mutation.java         |   63 -
 .../session/transaction/WriteMutation.java    |   58 -
 .../session/transaction/package-info.java     |   22 -
 .../store/cassandra/utils/DDLGenerator.java   |   83 -
 .../store/cassandra/utils/package-info.java   |   22 -
 .../store/src/test/bootstrap/aws/README.txt   |   13 -
 .../aws/cassandra/cassandra-bootstrap.sh      |  336 ----
 .../bootstrap/aws/cassandra/cassandra-env.sh  |  287 ----
 .../aws/cassandra/cassandra-start.sh          |  217 ---
 .../aws/cassandra/cassandra-template.yaml     |  888 ----------
 .../store/src/test/bootstrap/aws/common.sh    | 1481 -----------------
 .../store/src/test/bootstrap/aws/env.sh       |  113 --
 .../test/bootstrap/aws/ganglia/agent-start.sh |   75 -
 .../aws/ganglia/ganglia-bootstrap.sh          |  417 -----
 .../bootstrap/aws/ignite/ignite-bootstrap.sh  |  336 ----
 .../ignite-cassandra-server-template.xml      |  181 --
 .../test/bootstrap/aws/ignite/ignite-env.sh   |   29 -
 .../test/bootstrap/aws/ignite/ignite-start.sh |  266 ---
 .../src/test/bootstrap/aws/logs-collector.sh  |  173 --
 .../ignite-cassandra-client-template.xml      |  183 --
 .../bootstrap/aws/tests/tests-bootstrap.sh    |  317 ----
 .../test/bootstrap/aws/tests/tests-manager.sh |  458 -----
 .../test/bootstrap/aws/tests/tests-report.sh  |  499 ------
 .../ignite/tests/CassandraConfigTest.java     |  141 --
 .../CassandraDirectPersistenceLoadTest.java   |  108 --
 .../tests/CassandraDirectPersistenceTest.java |  767 ---------
 .../ignite/tests/CassandraLocalServer.java    |   60 -
 .../tests/CassandraSessionImplTest.java       |  211 ---
 .../apache/ignite/tests/DDLGeneratorTest.java |   60 -
 .../tests/DatasourceSerializationTest.java    |  155 --
 .../tests/IgnitePersistentStoreLoadTest.java  |  112 --
 .../IgnitePersistentStorePrimitiveTest.java   |  143 --
 .../tests/IgnitePersistentStoreTest.java      |  920 ----------
 .../LoadTestsCassandraArtifactsCreator.java   |  107 --
 .../apache/ignite/tests/load/Generator.java   |   27 -
 .../ignite/tests/load/IntGenerator.java       |   33 -
 .../ignite/tests/load/LoadTestDriver.java     |  238 ---
 .../ignite/tests/load/LongGenerator.java      |   28 -
 .../ignite/tests/load/PersonGenerator.java    |   45 -
 .../ignite/tests/load/PersonIdGenerator.java  |   31 -
 .../ignite/tests/load/StringGenerator.java    |   28 -
 .../org/apache/ignite/tests/load/Worker.java  |  432 -----
 .../tests/load/cassandra/BulkReadWorker.java  |   63 -
 .../tests/load/cassandra/BulkWriteWorker.java |   52 -
 .../tests/load/cassandra/ReadWorker.java      |   51 -
 .../tests/load/cassandra/WriteWorker.java     |   51 -
 .../tests/load/cassandra/package-info.java    |   22 -
 .../tests/load/ignite/BulkReadWorker.java     |   52 -
 .../tests/load/ignite/BulkWriteWorker.java    |   52 -
 .../ignite/tests/load/ignite/ReadWorker.java  |   51 -
 .../ignite/tests/load/ignite/WriteWorker.java |   51 -
 .../tests/load/ignite/package-info.java       |   22 -
 .../ignite/tests/load/package-info.java       |   22 -
 .../org/apache/ignite/tests/package-info.java |   22 -
 .../org/apache/ignite/tests/pojos/Person.java |  261 ---
 .../apache/ignite/tests/pojos/PersonId.java   |  103 --
 .../apache/ignite/tests/pojos/Product.java    |  116 --
 .../ignite/tests/pojos/ProductOrder.java      |  142 --
 .../ignite/tests/pojos/SimplePerson.java      |  184 --
 .../ignite/tests/pojos/SimplePersonId.java    |   87 -
 .../ignite/tests/pojos/package-info.java      |   22 -
 .../ignite/tests/utils/CacheStoreHelper.java  |   78 -
 .../utils/CassandraAdminCredentials.java      |   38 -
 .../ignite/tests/utils/CassandraHelper.java   |  366 ----
 .../tests/utils/CassandraLifeCycleBean.java   |  150 --
 .../utils/CassandraRegularCredentials.java    |   38 -
 .../ignite/tests/utils/TestCacheSession.java  |   94 --
 .../ignite/tests/utils/TestTransaction.java   |  157 --
 .../ignite/tests/utils/TestsHelper.java       |  752 ---------
 .../ignite/tests/utils/package-info.java      |   22 -
 .../store/IgniteCassandraStoreTestSuite.java  |   82 -
 .../cassandra/store/package-info.java         |   21 -
 .../src/test/resources/log4j2.properties      |  178 --
 .../tests/cassandra/connection-settings.xml   |   52 -
 .../tests/cassandra/connection.properties     |   17 -
 .../tests/cassandra/credentials.properties    |   22 -
 .../tests/cassandra/embedded-cassandra.yaml   |  120 --
 .../tests/cassandra/keyspaces.properties      |   17 -
 .../tests/persistence/blob/ignite-config.xml  |   96 --
 .../blob/persistence-settings-1.xml           |   21 -
 .../blob/persistence-settings-2.xml           |   21 -
 .../blob/persistence-settings-3.xml           |   29 -
 .../loadall_blob/ignite-config.xml            |   90 -
 .../loadall_blob/persistence-settings.xml     |   29 -
 .../tests/persistence/pojo/ignite-config.xml  |  212 ---
 .../ignite/tests/persistence/pojo/order.xml   |   21 -
 .../pojo/persistence-settings-1.xml           |   21 -
 .../pojo/persistence-settings-2.xml           |   21 -
 .../pojo/persistence-settings-3.xml           |  175 --
 .../pojo/persistence-settings-4.xml           |  175 --
 .../pojo/persistence-settings-5.xml           |   21 -
 .../pojo/persistence-settings-6.xml           |  174 --
 .../ignite/tests/persistence/pojo/product.xml |   21 -
 .../persistence/primitive/ignite-config.xml   |   96 --
 .../primitive/ignite-remote-client-config.xml |   99 --
 .../primitive/ignite-remote-server-config.xml |  110 --
 .../primitive/persistence-settings-1.xml      |   21 -
 .../primitive/persistence-settings-2.xml      |   21 -
 .../store/src/test/resources/tests.properties |   65 -
 .../src/test/scripts/cassandra-load-tests.bat |   41 -
 .../src/test/scripts/cassandra-load-tests.sh  |   39 -
 .../src/test/scripts/ignite-load-tests.bat    |   41 -
 .../src/test/scripts/ignite-load-tests.sh     |   39 -
 .../store/src/test/scripts/jvm-opt.sh         |   21 -
 .../store/src/test/scripts/jvm-opts.bat       |   24 -
 .../scripts/recreate-cassandra-artifacts.bat  |   41 -
 .../scripts/recreate-cassandra-artifacts.sh   |   39 -
 .../ignite-modules-test/build.gradle          |    6 -
 pom.xml                                       |    1 -
 167 files changed, 24107 deletions(-)
 delete mode 100644 modules/cassandra/README.txt
 delete mode 100644 modules/cassandra/pom.xml
 delete mode 100644 modules/cassandra/serializers/README.txt
 delete mode 100644 modules/cassandra/serializers/licenses/apache-2.0.txt
 delete mode 100644 modules/cassandra/serializers/pom.xml
 delete mode 100644 modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
 delete mode 100644 modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
 delete mode 100644 modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/KryoSerializerTest.java
 delete mode 100644 modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/MyPojo.java
 delete mode 100644 modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/SerializerSuite.java
 delete mode 100644 modules/cassandra/store/README.txt
 delete mode 100644 modules/cassandra/store/licenses/apache-2.0.txt
 delete mode 100644 modules/cassandra/store/pom.xml
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoFieldAccessor.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedPreparedStatement.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedSession.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/IdleSession.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/BaseMutation.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/DeleteMutation.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/Mutation.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/WriteMutation.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/package-info.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
 delete mode 100644 modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/README.txt
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-env.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-start.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/common.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/env.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/ganglia/agent-start.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/ganglia/ganglia-bootstrap.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-env.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-start.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/logs-collector.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/tests/tests-bootstrap.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/tests/tests-manager.sh
 delete mode 100644 modules/cassandra/store/src/test/bootstrap/aws/tests/tests-report.sh
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraConfigTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraLocalServer.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraSessionImplTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/DatasourceSerializationTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStorePrimitiveTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/LoadTestsCassandraArtifactsCreator.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Generator.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LongGenerator.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/StringGenerator.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Worker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Person.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/PersonId.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Product.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/ProductOrder.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePerson.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePersonId.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/IgniteCassandraStoreTestSuite.java
 delete mode 100644 modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/package-info.java
 delete mode 100644 modules/cassandra/store/src/test/resources/log4j2.properties
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/ignite-config.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/persistence-settings.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/order.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-5.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-6.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/product.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml
 delete mode 100644 modules/cassandra/store/src/test/resources/tests.properties
 delete mode 100644 modules/cassandra/store/src/test/scripts/cassandra-load-tests.bat
 delete mode 100644 modules/cassandra/store/src/test/scripts/cassandra-load-tests.sh
 delete mode 100644 modules/cassandra/store/src/test/scripts/ignite-load-tests.bat
 delete mode 100644 modules/cassandra/store/src/test/scripts/ignite-load-tests.sh
 delete mode 100644 modules/cassandra/store/src/test/scripts/jvm-opt.sh
 delete mode 100644 modules/cassandra/store/src/test/scripts/jvm-opts.bat
 delete mode 100644 modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.bat
 delete mode 100644 modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.sh

diff --git a/assembly/dependencies-apache-ignite-lgpl.xml b/assembly/dependencies-apache-ignite-lgpl.xml
index 40fbedf4f27a1..cb8812ba82d3a 100644
--- a/assembly/dependencies-apache-ignite-lgpl.xml
+++ b/assembly/dependencies-apache-ignite-lgpl.xml
@@ -127,7 +127,6 @@
                 <exclude>${project.groupId}:ignite-codegen</exclude>
                 <exclude>${project.groupId}:ignite-appserver-test</exclude>
                 <exclude>${project.groupId}:ignite-websphere-test</exclude>
-                <exclude>${project.groupId}:ignite-cassandra</exclude>
                 <exclude>${project.groupId}:ignite-yardstick</exclude>
                 <exclude>${project.groupId}:ignite-benchmarks</exclude>
                 <exclude>${project.groupId}:ignite-dev-utils</exclude>
diff --git a/assembly/dependencies-apache-ignite-slim.xml b/assembly/dependencies-apache-ignite-slim.xml
index 2604925c5c752..0d40e3e520ba8 100644
--- a/assembly/dependencies-apache-ignite-slim.xml
+++ b/assembly/dependencies-apache-ignite-slim.xml
@@ -128,7 +128,6 @@
                 <exclude>${project.groupId}:ignite-schedule</exclude>
                 <exclude>${project.groupId}:ignite-appserver-test</exclude>
                 <exclude>${project.groupId}:ignite-websphere-test</exclude>
-                <exclude>${project.groupId}:ignite-cassandra</exclude>
                 <exclude>${project.groupId}:ignite-yardstick</exclude>
                 <exclude>${project.groupId}:ignite-benchmarks</exclude>
                 <exclude>${project.groupId}:ignite-dev-utils</exclude>
@@ -136,8 +135,6 @@
                 <exclude>${project.groupId}:ignite-compatibility</exclude>
                 <exclude>${project.groupId}:ignite-sqlline</exclude>
                 <!-- Removed from slim packaging are: -->
-                <exclude>${project.groupId}:ignite-cassandra-serializers</exclude>
-                <exclude>${project.groupId}:ignite-cassandra-store</exclude>
                 <exclude>${project.groupId}:ignite-direct-io</exclude>
                 <exclude>${project.groupId}:ignite-jcl</exclude>
                 <exclude>${project.groupId}:ignite-web</exclude>
diff --git a/assembly/dependencies-apache-ignite.xml b/assembly/dependencies-apache-ignite.xml
index ab81d967cc020..c6c06c48549e6 100644
--- a/assembly/dependencies-apache-ignite.xml
+++ b/assembly/dependencies-apache-ignite.xml
@@ -128,7 +128,6 @@
                 <exclude>${project.groupId}:ignite-schedule</exclude>
                 <exclude>${project.groupId}:ignite-appserver-test</exclude>
                 <exclude>${project.groupId}:ignite-websphere-test</exclude>
-                <exclude>${project.groupId}:ignite-cassandra</exclude>
                 <exclude>${project.groupId}:ignite-yardstick</exclude>
                 <exclude>${project.groupId}:ignite-benchmarks</exclude>
                 <exclude>${project.groupId}:ignite-dev-utils</exclude>
diff --git a/assembly/libs/README.txt b/assembly/libs/README.txt
index 6dac19fe96e4a..760fe6c39b461 100644
--- a/assembly/libs/README.txt
+++ b/assembly/libs/README.txt
@@ -71,7 +71,6 @@ Here is how it can be imported into your POM file:
 All optional modules can be imported just like the core module, but with different artifact IDs.
 
 The following modules are available:
-- ignite-cassandra (for Apache Cassandra integration)
 - ignite-indexing (for SQL querying and indexing)
 - ignite-jcl (for Apache Commons logging)
 - ignite-jta (for XA integration)
diff --git a/modules/cassandra/README.txt b/modules/cassandra/README.txt
deleted file mode 100644
index 146e5d44465fb..0000000000000
--- a/modules/cassandra/README.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Apache Ignite Cassandra Module
-------------------------
-
-Apache Ignite Cassandra module, used just as a parent container for other Cassandra related modules.
-
-Importing Cassandra Module In Maven Project
--------------------------------------
-
-If you are using Maven to manage dependencies of your project, you can add Cassandra Store module
-dependency like this (replace '${ignite.version}' with actual Ignite version you are
-interested in):
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-                        http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    ...
-    <dependencies>
-        ...
-        <dependency>
-            <groupId>org.apache.ignite</groupId>
-            <artifactId>ignite-cassandra</artifactId>
-            <version>${ignite.version}</version>
-        </dependency>
-        ...
-    </dependencies>
-    ...
-</project>
diff --git a/modules/cassandra/pom.xml b/modules/cassandra/pom.xml
deleted file mode 100644
index 4652db62e061f..0000000000000
--- a/modules/cassandra/pom.xml
+++ /dev/null
@@ -1,72 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    POM file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.ignite</groupId>
-        <artifactId>ignite-parent-internal</artifactId>
-        <version>${revision}</version>
-        <relativePath>../../parent-internal/pom.xml</relativePath>
-    </parent>
-
-    <artifactId>ignite-cassandra</artifactId>
-    <packaging>pom</packaging>
-
-    <url>http://ignite.apache.org</url>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>ignite-cassandra-store</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>org.mockito</groupId>
-                <artifactId>mockito-core</artifactId>
-                <version>${mockito.version}</version>
-                <scope>test</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
-    <modules>
-        <module>store</module>
-        <module>serializers</module>
-    </modules>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-deploy-plugin</artifactId>
-                <version>2.8.2</version>
-                <inherited>false</inherited>
-                <configuration>
-                    <skip>true</skip>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>
diff --git a/modules/cassandra/serializers/README.txt b/modules/cassandra/serializers/README.txt
deleted file mode 100644
index aaa309fae2972..0000000000000
--- a/modules/cassandra/serializers/README.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Apache Ignite Cassandra Serializers Module
-------------------------
-
-Apache Ignite Cassandra Serializers module provides additional serializers to store objects as BLOBs in Cassandra. The
-module could be used as an addition to Ignite Cassandra Store module.
-
-To enable Cassandra Serializers module when starting a standalone node, move 'optional/ignite-cassandra-serializers'
-folder to 'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will be added to
-classpath in this case. Note, copying folder 'optional/ignite-cassandra-serializers' requires copying
-'optional/ignite-cassandra-store' folder.
-
-Importing Cassandra Serializers Module In Maven Project
--------------------------------------
-
-If you are using Maven to manage dependencies of your project, you can add Cassandra Store module
-dependency like this (replace '${ignite.version}' with actual Ignite version you are
-interested in):
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-                        http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    ...
-    <dependencies>
-        ...
-        <dependency>
-            <groupId>org.apache.ignite</groupId>
-            <artifactId>ignite-cassandra-serializers</artifactId>
-            <version>${ignite.version}</version>
-        </dependency>
-        ...
-    </dependencies>
-    ...
-</project>
diff --git a/modules/cassandra/serializers/licenses/apache-2.0.txt b/modules/cassandra/serializers/licenses/apache-2.0.txt
deleted file mode 100644
index d645695673349..0000000000000
--- a/modules/cassandra/serializers/licenses/apache-2.0.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/modules/cassandra/serializers/pom.xml b/modules/cassandra/serializers/pom.xml
deleted file mode 100644
index e00d461bab988..0000000000000
--- a/modules/cassandra/serializers/pom.xml
+++ /dev/null
@@ -1,141 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    POM file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.ignite</groupId>
-        <artifactId>ignite-cassandra</artifactId>
-        <version>${revision}</version>
-        <relativePath>../pom.xml</relativePath>
-    </parent>
-
-    <artifactId>ignite-cassandra-serializers</artifactId>
-
-    <url>http://ignite.apache.org</url>
-
-    <properties>
-        <kryo.version>3.0.3</kryo.version>
-        <reflectasm.version>1.10.1</reflectasm.version>
-        <minlog.version>1.3.0</minlog.version>
-        <asm.version>5.0.3</asm.version>
-        <objenesis.version>2.1</objenesis.version>
-    </properties>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>ignite-cassandra-store</artifactId>
-        </dependency>
-
-        <!-- Kryo and required dependencies -->
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>kryo</artifactId>
-            <version>${kryo.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>reflectasm</artifactId>
-            <version>${reflectasm.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>minlog</artifactId>
-            <version>${minlog.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.ow2.asm</groupId>
-            <artifactId>asm</artifactId>
-            <version>${asm.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.objenesis</groupId>
-            <artifactId>objenesis</artifactId>
-            <version>${objenesis.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>ignite-tools</artifactId>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>3.7.0</version>
-                <configuration>
-                    <encoding>UTF-8</encoding>
-                    <fork>true</fork>
-                    <debug>false</debug>
-                    <debuglevel>lines,vars,source</debuglevel>
-                    <meminitial>256</meminitial>
-                    <maxmem>512</maxmem>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-dependency-plugin</artifactId>
-                <version>2.10</version>
-                <executions>
-                    <execution>
-                        <id>copy-main-dependencies</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>copy-dependencies</goal>
-                        </goals>
-                        <configuration>
-                            <outputDirectory>${project.build.directory}/libs</outputDirectory>
-                            <overWriteReleases>false</overWriteReleases>
-                            <overWriteSnapshots>false</overWriteSnapshots>
-                            <overWriteIfNewer>true</overWriteIfNewer>
-                            <excludeTransitive>true</excludeTransitive>
-                            <excludeArtifactIds>
-                                ignite-cassandra-store
-                            </excludeArtifactIds>
-                            <includeScope>runtime</includeScope>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-deploy-plugin</artifactId>
-                <version>2.8.2</version>
-                <configuration>
-                    <skip>false</skip>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>
diff --git a/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java b/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
deleted file mode 100644
index 9de841b5533ba..0000000000000
--- a/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/KryoSerializer.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.serializer;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.nio.ByteBuffer;
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.io.Input;
-import com.esotericsoftware.kryo.io.Output;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Serializer based on Kryo serialization.
- */
-public class KryoSerializer implements Serializer {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** */
-    private static final int DFLT_BUFFER_SIZE = 4096;
-
-    /** Thread local instance of {@link Kryo} */
-    private transient ThreadLocal<Kryo> kryos = new ThreadLocal<Kryo>() {
-        /** {@inheritDoc} */
-        @Override protected Kryo initialValue() {
-            return new Kryo();
-        }
-    };
-
-    /** {@inheritDoc} */
-    @Override public ByteBuffer serialize(Object obj) {
-        if (obj == null)
-            return null;
-
-        ByteArrayOutputStream stream = null;
-
-        Output out = null;
-
-        try {
-            stream = new ByteArrayOutputStream(DFLT_BUFFER_SIZE);
-
-            out = new Output(stream);
-
-            kryos.get().writeClassAndObject(out, obj);
-            out.flush();
-
-            return ByteBuffer.wrap(stream.toByteArray());
-        }
-        catch (Throwable e) {
-            throw new IllegalStateException("Failed to serialize object of the class '" + obj.getClass().getName() + "'", e);
-        }
-        finally {
-            U.closeQuiet(out);
-            U.closeQuiet(stream);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object deserialize(ByteBuffer buf) {
-        ByteArrayInputStream stream = null;
-        Input in = null;
-
-        try {
-            stream = new ByteArrayInputStream(buf.array());
-            in = new Input(stream);
-
-            return kryos.get().readClassAndObject(in);
-        }
-        catch (Throwable e) {
-            throw new IllegalStateException("Failed to deserialize object from byte stream", e);
-        }
-        finally {
-            U.closeQuiet(in);
-            U.closeQuiet(stream);
-        }
-    }
-}
diff --git a/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java b/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
deleted file mode 100644
index 838fbbf981d37..0000000000000
--- a/modules/cassandra/serializers/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains Cassandra serializers.
- */
-
-package org.apache.ignite.cache.store.cassandra.serializer;
diff --git a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/KryoSerializerTest.java b/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/KryoSerializerTest.java
deleted file mode 100644
index b840363953877..0000000000000
--- a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/KryoSerializerTest.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.nio.ByteBuffer;
-import java.util.Date;
-import org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Simple test for KryoSerializer.
- */
-public class KryoSerializerTest {
-    /**
-     * Serialize simple object test.
-     */
-    @Test
-    public void simpleTest() {
-        MyPojo pojo1 = new MyPojo("123", 1, 123423453467L, new Date(), null);
-
-        KryoSerializer ser = new KryoSerializer();
-
-        ByteBuffer buff = ser.serialize(pojo1);
-        MyPojo pojo2 = (MyPojo)ser.deserialize(buff);
-
-        assertEquals("Kryo simple serialization test failed", pojo1, pojo2);
-    }
-
-    /**
-     * Serialize object with cyclic references test.
-     */
-    @Test
-    public void cyclicStructureTest() {
-        MyPojo pojo1 = new MyPojo("123", 1, 123423453467L, new Date(), null);
-        MyPojo pojo2 = new MyPojo("321", 2, 123456L, new Date(), pojo1);
-        pojo1.setRef(pojo2);
-
-        KryoSerializer ser = new KryoSerializer();
-
-        ByteBuffer buff1 = ser.serialize(pojo1);
-        ByteBuffer buff2 = ser.serialize(pojo2);
-
-        MyPojo pojo3 = (MyPojo)ser.deserialize(buff1);
-        MyPojo pojo4 = (MyPojo)ser.deserialize(buff2);
-
-        assertEquals("Kryo cyclic structure serialization test failed", pojo1, pojo3);
-        assertEquals("Kryo cyclic structure serialization test failed", pojo1.getRef(), pojo3.getRef());
-        assertEquals("Kryo cyclic structure serialization test failed", pojo2, pojo4);
-        assertEquals("Kryo cyclic structure serialization test failed", pojo2.getRef(), pojo4.getRef());
-    }
-}
diff --git a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/MyPojo.java b/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/MyPojo.java
deleted file mode 100644
index ea6a93232faf4..0000000000000
--- a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/MyPojo.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.io.Serializable;
-import java.util.Date;
-
-/**
- * Sample POJO for tests.
- */
-public class MyPojo implements Serializable {
-    /** */
-    private String field1;
-
-    /** */
-    private int field2;
-
-    /** */
-    private long field3;
-
-    /** */
-    private Date field4;
-
-    /** */
-    private MyPojo ref;
-
-    /**
-     * Empty constructor.
-     */
-    public MyPojo() {
-        // No-op.
-    }
-
-    /**
-     * Full constructor.
-     *
-     * @param field1 Some value.
-     * @param field2 Some value.
-     * @param field3 Some value.
-     * @param field4 Some value.
-     * @param ref Reference to other pojo.
-     */
-    public MyPojo(String field1, int field2, long field3, Date field4, MyPojo ref) {
-        this.field1 = field1;
-        this.field2 = field2;
-        this.field3 = field3;
-        this.field4 = field4;
-        this.ref = ref;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object obj) {
-        if (!(obj instanceof MyPojo))
-            return false;
-
-        MyPojo myObj = (MyPojo)obj;
-
-        if ((field1 == null && myObj.field1 != null) ||
-            (field1 != null && !field1.equals(myObj.field1)))
-            return false;
-
-        if ((field4 == null && myObj.field4 != null) ||
-            (field4 != null && !field4.equals(myObj.field4)))
-            return false;
-
-        return field2 == myObj.field2 && field3 == myObj.field3;
-    }
-
-    /**
-     * @param ref New reference.
-     */
-    public void setRef(MyPojo ref) {
-        this.ref = ref;
-    }
-
-    /**
-     * @return Reference to some POJO.
-     */
-    public MyPojo getRef() {
-        return ref;
-    }
-}
diff --git a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/SerializerSuite.java b/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/SerializerSuite.java
deleted file mode 100644
index b6f5e0782d33f..0000000000000
--- a/modules/cassandra/serializers/src/test/java/org/apache/ignite/tests/SerializerSuite.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import org.junit.runner.RunWith;
-import org.junit.runners.Suite;
-
-/**
- * Suite contains serializers tests.
- */
-@RunWith(Suite.class)
-@Suite.SuiteClasses(KryoSerializerTest.class)
-public class SerializerSuite {
-}
diff --git a/modules/cassandra/store/README.txt b/modules/cassandra/store/README.txt
deleted file mode 100644
index fd72dea4f20d9..0000000000000
--- a/modules/cassandra/store/README.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Apache Ignite Cassandra Store Module
-------------------------
-
-Apache Ignite Cassandra Store module provides CacheStore implementation backed by Cassandra database.
-
-To enable Cassandra Store module when starting a standalone node, move 'optional/ignite-cassandra-store' folder to
-'libs' folder before running 'ignite.{sh|bat}' script. The content of the module folder will
-be added to classpath in this case.
-
-Importing Cassandra Store Module In Maven Project
--------------------------------------
-
-If you are using Maven to manage dependencies of your project, you can add Cassandra Store module
-dependency like this (replace '${ignite.version}' with actual Ignite version you are
-interested in):
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-                        http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    ...
-    <dependencies>
-        ...
-        <dependency>
-            <groupId>org.apache.ignite</groupId>
-            <artifactId>ignite-cassandra-store</artifactId>
-            <version>${ignite.version}</version>
-        </dependency>
-        ...
-    </dependencies>
-    ...
-</project>
diff --git a/modules/cassandra/store/licenses/apache-2.0.txt b/modules/cassandra/store/licenses/apache-2.0.txt
deleted file mode 100644
index d645695673349..0000000000000
--- a/modules/cassandra/store/licenses/apache-2.0.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/modules/cassandra/store/pom.xml b/modules/cassandra/store/pom.xml
deleted file mode 100644
index d3a7f1254d00a..0000000000000
--- a/modules/cassandra/store/pom.xml
+++ /dev/null
@@ -1,333 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    POM file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.ignite</groupId>
-        <artifactId>ignite-cassandra</artifactId>
-        <version>${revision}</version>
-        <relativePath>../pom.xml</relativePath>
-    </parent>
-
-    <artifactId>ignite-cassandra-store</artifactId>
-
-    <url>http://ignite.apache.org</url>
-
-    <properties>
-        <commons-beanutils.version>1.9.4</commons-beanutils.version>
-        <cassandra-driver.version>3.2.0</cassandra-driver.version>
-        <cassandra-all.version>3.11.3</cassandra-all.version>
-        <metrics-core.version>3.0.2</metrics-core.version>
-        <dataostaxguava.version>25.1-jre</dataostaxguava.version>
-    </properties>
-
-    <dependencies>
-        <!-- Apache commons -->
-        <dependency>
-            <groupId>commons-beanutils</groupId>
-            <artifactId>commons-beanutils</artifactId>
-            <version>${commons-beanutils.version}</version>
-        </dependency>
-
-        <!-- Ignite -->
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>ignite-core</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>ignite-spring</artifactId>
-        </dependency>
-
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>ignite-log4j2</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>ignite-tools</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-        <!-- Cassandra and required dependencies -->
-        <dependency>
-            <groupId>com.datastax.cassandra</groupId>
-            <artifactId>cassandra-driver-core</artifactId>
-            <version>${cassandra-driver.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-handler</artifactId>
-            <version>${netty.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-buffer</artifactId>
-            <version>${netty.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-common</artifactId>
-            <version>${netty.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-transport</artifactId>
-            <version>${netty.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-codec</artifactId>
-            <version>${netty.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty-resolver</artifactId>
-            <version>${netty.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.datastax.oss</groupId>
-            <artifactId>java-driver-shaded-guava</artifactId>
-            <version>${dataostaxguava.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>com.codahale.metrics</groupId>
-            <artifactId>metrics-core</artifactId>
-            <version>${metrics-core.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-            <version>${slf4j.version}</version>
-        </dependency>
-
-        <dependency>
-            <groupId>org.apache.cassandra</groupId>
-            <artifactId>cassandra-all</artifactId>
-            <version>${cassandra-all.version}</version>
-            <scope>test</scope>
-            <exclusions>
-                <exclusion>
-                    <artifactId>log4j-over-slf4j</artifactId>
-                    <groupId>org.slf4j</groupId>
-                </exclusion>
-                <exclusion>
-                    <groupId>commons-codec</groupId>
-                    <artifactId>commons-codec</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-
-        <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-core</artifactId>
-            <scope>test</scope>
-        </dependency>
-
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>ignite-core</artifactId>
-            <type>test-jar</type>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>3.7.0</version>
-                <configuration>
-                    <encoding>UTF-8</encoding>
-                    <fork>true</fork>
-                    <debug>false</debug>
-                    <debuglevel>lines,vars,source</debuglevel>
-                    <meminitial>256</meminitial>
-                    <maxmem>512</maxmem>
-                </configuration>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-dependency-plugin</artifactId>
-                <version>2.10</version>
-                <executions>
-                    <execution>
-                        <id>copy-all-dependencies</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>copy-dependencies</goal>
-                        </goals>
-                        <configuration>
-                            <outputDirectory>${project.build.directory}/tests-package/lib</outputDirectory>
-                            <overWriteReleases>false</overWriteReleases>
-                            <overWriteSnapshots>false</overWriteSnapshots>
-                            <overWriteIfNewer>true</overWriteIfNewer>
-                            <excludeArtifactIds>
-                                netty-all,cassandra-all,snappy-java,lz4,compress-lzf,commons-codec,commons-lang3,commons-math3,
-                                concurrentlinkedhashmap-lru,antlr,ST4,antlr-runtime,jcl-over-slf4j,jackson-core-asl,
-                                jackson-mapper-asl,json-simple,high-scale-lib,snakeyaml,jbcrypt,reporter-config3,
-                                reporter-config-base,hibernate-validator,validation-api,jboss-logging,thrift-server,
-                                disruptor,stream,fastutil,logback-core,logback-classic,libthrift,httpclient,httpcore,
-                                cassandra-thrift,jna,jamm,joda-time,sigar,ecj,tools
-                            </excludeArtifactIds>
-                        </configuration>
-                    </execution>
-<!-- -->
-                    <execution>
-                        <id>copy-main-dependencies</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>copy-dependencies</goal>
-                        </goals>
-                        <configuration>
-                            <outputDirectory>${project.build.directory}/libs</outputDirectory>
-                            <overWriteReleases>false</overWriteReleases>
-                            <overWriteSnapshots>false</overWriteSnapshots>
-                            <overWriteIfNewer>true</overWriteIfNewer>
-                            <excludeTransitive>true</excludeTransitive>
-                            <excludeGroupIds>
-                                ${project.groupId},org.springframework,org.gridgain
-                            </excludeGroupIds>
-                            <excludeArtifactIds>
-                                commons-logging,slf4j-api,cache-api,slf4j-api,aopalliance
-                            </excludeArtifactIds>
-                            <includeScope>runtime</includeScope>
-                        </configuration>
-                    </execution>
-<!-- -->
-                </executions>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-antrun-plugin</artifactId>
-
-                <dependencies>
-                    <dependency>
-                        <groupId>ant-contrib</groupId>
-                        <artifactId>ant-contrib</artifactId>
-                        <version>1.0b3</version>
-                        <exclusions>
-                            <exclusion>
-                                <groupId>ant</groupId>
-                                <artifactId>ant</artifactId>
-                            </exclusion>
-                        </exclusions>
-                    </dependency>
-                </dependencies>
-                <executions>
-                    <execution>
-                        <id>package-tests</id>
-                        <phase>package</phase>
-                        <goals>
-                            <goal>run</goal>
-                        </goals>
-                        <configuration>
-                            <target>
-                                <taskdef resource="net/sf/antcontrib/antlib.xml" />
-                                <if>
-                                    <available file="${project.build.directory}/test-classes" type="dir" />
-                                    <then>
-                                        <copy todir="${project.build.directory}/tests-package/lib">
-                                            <fileset dir="${project.build.directory}">
-                                                <include name="*.jar" />
-                                            </fileset>
-                                        </copy>
-
-                                        <jar destfile="${project.build.directory}/tests-package/lib/${project.artifactId}-${project.version}-tests.jar">
-                                            <fileset dir="${project.build.directory}/test-classes">
-                                                <include name="**/*.class" />
-                                            </fileset>
-                                        </jar>
-
-                                        <copy todir="${project.build.directory}/tests-package/settings">
-                                            <fileset dir="${project.build.directory}/test-classes">
-                                                <include name="**/*.properties" />
-                                                <include name="**/*.xml" />
-                                            </fileset>
-                                        </copy>
-
-                                        <copy todir="${project.build.directory}/tests-package">
-                                            <fileset dir="${project.build.testSourceDirectory}/../scripts">
-                                                <include name="**/*" />
-                                            </fileset>
-                                        </copy>
-
-                                        <fixcrlf srcdir="${project.build.directory}/tests-package" eol="lf" eof="remove">
-                                            <include name="*.sh" />
-                                        </fixcrlf>
-
-                                        <copy todir="${project.build.directory}/tests-package">
-                                            <fileset dir="${project.build.testSourceDirectory}/..">
-                                                <include name="bootstrap/**" />
-                                            </fileset>
-                                        </copy>
-
-                                        <fixcrlf srcdir="${project.build.directory}/tests-package/bootstrap" eol="lf" eof="remove">
-                                            <include name="**" />
-                                        </fixcrlf>
-
-                                        <zip destfile="${project.build.directory}/ignite-cassandra-tests-${project.version}.zip" compress="true" whenempty="create" level="9" encoding="UTF-8" useLanguageEncodingFlag="true" createUnicodeExtraFields="not-encodeable">
-
-                                            <zipfileset dir="${project.build.directory}/tests-package" prefix="ignite-cassandra-tests">
-                                                <exclude name="**/*.sh" />
-                                            </zipfileset>
-
-                                            <zipfileset dir="${project.build.directory}/tests-package" prefix="ignite-cassandra-tests" filemode="555">
-                                                <include name="**/*.sh" />
-                                            </zipfileset>
-                                        </zip>
-                                    </then>
-                                </if>
-                            </target>
-                        </configuration>
-                    </execution>
-                </executions>
-            </plugin>
-
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-deploy-plugin</artifactId>
-                <version>2.8.2</version>
-                <configuration>
-                    <skip>false</skip>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
deleted file mode 100644
index 03abc4e300ce1..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStore.java
+++ /dev/null
@@ -1,546 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import javax.cache.Cache;
-import javax.cache.integration.CacheLoaderException;
-import javax.cache.integration.CacheWriterException;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Statement;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.cache.store.CacheStoreSession;
-import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
-import org.apache.ignite.cache.store.cassandra.session.CassandraSession;
-import org.apache.ignite.cache.store.cassandra.session.ExecutionAssistant;
-import org.apache.ignite.cache.store.cassandra.session.GenericBatchExecutionAssistant;
-import org.apache.ignite.cache.store.cassandra.session.LoadCacheCustomQueryWorker;
-import org.apache.ignite.cache.store.cassandra.session.transaction.DeleteMutation;
-import org.apache.ignite.cache.store.cassandra.session.transaction.Mutation;
-import org.apache.ignite.cache.store.cassandra.session.transaction.WriteMutation;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiInClosure;
-import org.apache.ignite.logger.NullLogger;
-import org.apache.ignite.resources.CacheStoreSessionResource;
-import org.apache.ignite.resources.IgniteInstanceResource;
-import org.apache.ignite.resources.LoggerResource;
-import org.apache.ignite.thread.IgniteThreadFactory;
-
-/**
- * Implementation of {@link CacheStore} backed by Cassandra database.
- *
- * @param <K> Ignite cache key type.
- * @param <V> Ignite cache value type.
- */
-public class CassandraCacheStore<K, V> implements CacheStore<K, V> {
-    /** Buffer to store mutations performed withing transaction. */
-    private static final String TRANSACTION_BUFFER = "CASSANDRA_TRANSACTION_BUFFER";
-
-    /** Thread name. */
-    private static final String CACHE_LOADER_THREAD_NAME = "cassandra-cache-loader";
-
-    /** Auto-injected ignite instance. */
-    @SuppressWarnings("unused")
-    @IgniteInstanceResource
-    private Ignite ignite;
-
-    /** Auto-injected store session. */
-    @SuppressWarnings("unused")
-    @CacheStoreSessionResource
-    private CacheStoreSession storeSes;
-
-    /** Auto-injected logger instance. */
-    @SuppressWarnings("unused")
-    @LoggerResource
-    private IgniteLogger log;
-
-    /** Cassandra data source. */
-    private DataSource dataSrc;
-
-    /** Max workers thread count. These threads are responsible for load cache. */
-    private int maxPoolSize = Runtime.getRuntime().availableProcessors();
-
-    /** Controller component responsible for serialization logic. */
-    private final PersistenceController controller;
-
-    /**
-     * Store constructor.
-     *
-     * @param dataSrc Data source.
-     * @param settings Persistence settings for Ignite key and value objects.
-     * @param maxPoolSize Max workers thread count.
-     */
-    public CassandraCacheStore(DataSource dataSrc, KeyValuePersistenceSettings settings, int maxPoolSize) {
-        this.dataSrc = dataSrc;
-        this.controller = new PersistenceController(settings);
-        this.maxPoolSize = maxPoolSize;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void loadCache(IgniteBiInClosure<K, V> clo, Object... args) throws CacheLoaderException {
-        if (clo == null)
-            return;
-
-        if (args == null || args.length == 0)
-            args = new String[] {"select * from " + controller.getPersistenceSettings().getKeyspace() + "." + cassandraTable() + ";"};
-
-        ExecutorService pool = null;
-
-        Collection<Future<?>> futs = new ArrayList<>(args.length);
-
-        try {
-            pool = Executors.newFixedThreadPool(maxPoolSize, new IgniteThreadFactory(ignite.name(), CACHE_LOADER_THREAD_NAME));
-
-            CassandraSession ses = getCassandraSession();
-
-            for (Object obj : args) {
-                LoadCacheCustomQueryWorker<K, V> task = null;
-
-                if (obj instanceof Statement)
-                    task = new LoadCacheCustomQueryWorker<>(ses, (Statement)obj, controller, log, clo);
-                else if (obj instanceof String) {
-                    String qry = ((String)obj).trim();
-
-                    if (qry.toLowerCase().startsWith("select"))
-                        task = new LoadCacheCustomQueryWorker<>(ses, (String)obj, controller, log, clo);
-                }
-
-                if (task != null)
-                    futs.add(pool.submit(task));
-            }
-
-            for (Future<?> fut : futs)
-                U.get(fut);
-
-            if (log != null && log.isDebugEnabled() && storeSes != null)
-                log.debug("Cache loaded from db: " + storeSes.cacheName());
-        }
-        catch (IgniteCheckedException e) {
-            if (storeSes != null)
-                throw new CacheLoaderException("Failed to load Ignite cache: " + storeSes.cacheName(), e.getCause());
-            else
-                throw new CacheLoaderException("Failed to load cache", e.getCause());
-        }
-        finally {
-            U.shutdownNow(getClass(), pool, log);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void sessionEnd(boolean commit) throws CacheWriterException {
-        if (!storeSes.isWithinTransaction())
-            return;
-
-        List<Mutation> mutations = mutations();
-        if (mutations == null || mutations.isEmpty())
-            return;
-
-        CassandraSession ses = getCassandraSession();
-
-        try {
-            ses.execute(mutations);
-        }
-        finally {
-            mutations.clear();
-            U.closeQuiet(ses);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings({"unchecked"})
-    @Override public V load(final K key) throws CacheLoaderException {
-        if (key == null)
-            return null;
-
-        CassandraSession ses = getCassandraSession();
-
-        try {
-            return ses.execute(new ExecutionAssistant<V>() {
-                /** {@inheritDoc} */
-                @Override public boolean tableExistenceRequired() {
-                    return false;
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getTable() {
-                    return cassandraTable();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getStatement() {
-                    return controller.getLoadStatement(cassandraTable(), false);
-                }
-
-                /** {@inheritDoc} */
-                @Override public BoundStatement bindStatement(PreparedStatement statement) {
-                    return controller.bindKey(statement, key);
-                }
-
-                /** {@inheritDoc} */
-                @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-                    return controller.getPersistenceSettings();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String operationName() {
-                    return "READ";
-                }
-
-                /** {@inheritDoc} */
-                @Override public V process(Row row) {
-                    return row == null ? null : (V)controller.buildValueObject(row);
-                }
-            });
-        }
-        finally {
-            U.closeQuiet(ses);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public Map<K, V> loadAll(Iterable<? extends K> keys) throws CacheLoaderException {
-        if (keys == null || !keys.iterator().hasNext())
-            return new HashMap<>();
-
-        CassandraSession ses = getCassandraSession();
-
-        try {
-            return ses.execute(new GenericBatchExecutionAssistant<Map<K, V>, K>() {
-                private Map<K, V> data = new HashMap<>();
-
-                /** {@inheritDoc} */
-                @Override public String getTable() {
-                    return cassandraTable();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getStatement() {
-                    return controller.getLoadStatement(cassandraTable(), true);
-                }
-
-                /** {@inheritDoc} */
-                @Override public BoundStatement bindStatement(PreparedStatement statement, K key) {
-                    return controller.bindKey(statement, key);
-                }
-
-                /** {@inheritDoc} */
-                @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-                    return controller.getPersistenceSettings();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String operationName() {
-                    return "BULK_READ";
-                }
-
-                /** {@inheritDoc} */
-                @Override public Map<K, V> processedData() {
-                    return data;
-                }
-
-                /** {@inheritDoc} */
-                @Override protected void process(Row row) {
-                    if (row != null)
-                        data.put((K)controller.buildKeyObject(row), (V)controller.buildValueObject(row));
-                }
-            }, keys);
-        }
-        finally {
-            U.closeQuiet(ses);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void write(final Cache.Entry<? extends K, ? extends V> entry) throws CacheWriterException {
-        if (entry == null || entry.getKey() == null)
-            return;
-
-        if (storeSes.isWithinTransaction()) {
-            accumulate(new WriteMutation(entry, cassandraTable(), controller));
-            return;
-        }
-
-        CassandraSession ses = getCassandraSession();
-
-        try {
-            ses.execute(new ExecutionAssistant<Void>() {
-                /** {@inheritDoc} */
-                @Override public boolean tableExistenceRequired() {
-                    return true;
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getTable() {
-                    return cassandraTable();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getStatement() {
-                    return controller.getWriteStatement(cassandraTable());
-                }
-
-                /** {@inheritDoc} */
-                @Override public BoundStatement bindStatement(PreparedStatement statement) {
-                    return controller.bindKeyValue(statement, entry.getKey(), entry.getValue());
-                }
-
-                /** {@inheritDoc} */
-                @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-                    return controller.getPersistenceSettings();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String operationName() {
-                    return "WRITE";
-                }
-
-                /** {@inheritDoc} */
-                @Override public Void process(Row row) {
-                    return null;
-                }
-            });
-        }
-        finally {
-            U.closeQuiet(ses);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeAll(Collection<Cache.Entry<? extends K, ? extends V>> entries) throws CacheWriterException {
-        if (entries == null || entries.isEmpty())
-            return;
-
-        if (storeSes.isWithinTransaction()) {
-            for (Cache.Entry<?, ?> entry : entries)
-                accumulate(new WriteMutation(entry, cassandraTable(), controller));
-
-            return;
-        }
-
-        CassandraSession ses = getCassandraSession();
-
-        try {
-            ses.execute(new GenericBatchExecutionAssistant<Void, Cache.Entry<? extends K, ? extends V>>() {
-                /** {@inheritDoc} */
-                @Override public String getTable() {
-                    return cassandraTable();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getStatement() {
-                    return controller.getWriteStatement(cassandraTable());
-                }
-
-                /** {@inheritDoc} */
-                @Override public BoundStatement bindStatement(PreparedStatement statement,
-                    Cache.Entry<? extends K, ? extends V> entry) {
-                    return controller.bindKeyValue(statement, entry.getKey(), entry.getValue());
-                }
-
-                /** {@inheritDoc} */
-                @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-                    return controller.getPersistenceSettings();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String operationName() {
-                    return "BULK_WRITE";
-                }
-
-                /** {@inheritDoc} */
-                @Override public boolean tableExistenceRequired() {
-                    return true;
-                }
-            }, entries);
-        }
-        finally {
-            U.closeQuiet(ses);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void delete(final Object key) throws CacheWriterException {
-        if (key == null)
-            return;
-
-        if (storeSes.isWithinTransaction()) {
-            accumulate(new DeleteMutation(key, cassandraTable(), controller));
-            return;
-        }
-
-        CassandraSession ses = getCassandraSession();
-
-        try {
-            ses.execute(new ExecutionAssistant<Void>() {
-                /** {@inheritDoc} */
-                @Override public boolean tableExistenceRequired() {
-                    return false;
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getTable() {
-                    return cassandraTable();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getStatement() {
-                    return controller.getDeleteStatement(cassandraTable());
-                }
-
-                /** {@inheritDoc} */
-                @Override public BoundStatement bindStatement(PreparedStatement statement) {
-                    return controller.bindKey(statement, key);
-                }
-
-                /** {@inheritDoc} */
-                @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-                    return controller.getPersistenceSettings();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String operationName() {
-                    return "DELETE";
-                }
-
-                /** {@inheritDoc} */
-                @Override public Void process(Row row) {
-                    return null;
-                }
-            });
-        }
-        finally {
-            U.closeQuiet(ses);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void deleteAll(Collection<?> keys) throws CacheWriterException {
-        if (keys == null || keys.isEmpty())
-            return;
-
-        if (storeSes.isWithinTransaction()) {
-            for (Object key : keys)
-                accumulate(new DeleteMutation(key, cassandraTable(), controller));
-
-            return;
-        }
-
-        CassandraSession ses = getCassandraSession();
-
-        try {
-            ses.execute(new GenericBatchExecutionAssistant<Void, Object>() {
-                /** {@inheritDoc} */
-                @Override public String getTable() {
-                    return cassandraTable();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String getStatement() {
-                    return controller.getDeleteStatement(cassandraTable());
-                }
-
-                /** {@inheritDoc} */
-                @Override public BoundStatement bindStatement(PreparedStatement statement, Object key) {
-                    return controller.bindKey(statement, key);
-                }
-
-                /** {@inheritDoc} */
-                @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-                    return controller.getPersistenceSettings();
-                }
-
-                /** {@inheritDoc} */
-                @Override public String operationName() {
-                    return "BULK_DELETE";
-                }
-            }, keys);
-        }
-        finally {
-            U.closeQuiet(ses);
-        }
-    }
-
-    /**
-     * Gets Cassandra session wrapper or creates new if it doesn't exist.
-     * This wrapper hides all the low-level Cassandra interaction details by providing only high-level methods.
-     *
-     * @return Cassandra session wrapper.
-     */
-    private CassandraSession getCassandraSession() {
-        return dataSrc.session(log != null ? log : new NullLogger());
-    }
-
-    /**
-     * Returns table name to use for all Cassandra based operations (READ/WRITE/DELETE).
-     *
-     * @return Table name.
-     */
-    private String cassandraTable() {
-        return controller.getPersistenceSettings().getTable() != null ?
-            controller.getPersistenceSettings().getTable() : storeSes.cacheName().trim().toLowerCase();
-    }
-
-    /**
-     * Accumulates mutation in the transaction buffer.
-     *
-     * @param mutation Mutation operation.
-     */
-    private void accumulate(Mutation mutation) {
-        //noinspection unchecked
-        List<Mutation> mutations = (List<Mutation>)storeSes.properties().get(TRANSACTION_BUFFER);
-
-        if (mutations == null) {
-            mutations = new LinkedList<>();
-            storeSes.properties().put(TRANSACTION_BUFFER, mutations);
-        }
-
-        mutations.add(mutation);
-    }
-
-    /**
-     * Returns all the mutations performed withing transaction.
-     *
-     * @return Mutations
-     */
-    private List<Mutation> mutations() {
-        //noinspection unchecked
-        return (List<Mutation>)storeSes.properties().get(TRANSACTION_BUFFER);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(CassandraCacheStore.class, this);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
deleted file mode 100644
index d170949e63807..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/CassandraCacheStoreFactory.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra;
-
-import javax.cache.configuration.Factory;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.internal.IgniteComponentType;
-import org.apache.ignite.internal.util.spring.IgniteSpringHelper;
-import org.apache.ignite.resources.SpringApplicationContextResource;
-
-/**
- * Factory class to instantiate {@link CassandraCacheStore}.
- *
- * @param <K> Ignite cache key type
- * @param <V> Ignite cache value type
- */
-public class CassandraCacheStoreFactory<K, V> implements Factory<CassandraCacheStore<K, V>> {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** Auto-injected Spring ApplicationContext resource. */
-    @SpringApplicationContextResource
-    private Object appCtx;
-
-    /** Name of data source bean. */
-    private String dataSrcBean;
-
-    /** Name of persistence settings bean. */
-    private String persistenceSettingsBean;
-
-    /** Data source. */
-    private DataSource dataSrc;
-
-    /** Persistence settings. */
-    private KeyValuePersistenceSettings persistenceSettings;
-
-    /** Max workers thread count. These threads are responsible for load cache. */
-    private int maxPoolSize = Runtime.getRuntime().availableProcessors();
-
-    /** {@inheritDoc} */
-    @Override public CassandraCacheStore<K, V> create() {
-        return new CassandraCacheStore<>(getDataSource(), getPersistenceSettings(), getMaxPoolSize());
-    }
-
-    /**
-     * Sets data source.
-     *
-     * @param dataSrc Data source.
-     *
-     * @return {@code This} for chaining.
-     */
-    public CassandraCacheStoreFactory<K, V> setDataSource(DataSource dataSrc) {
-        this.dataSrc = dataSrc;
-
-        return this;
-    }
-
-    /**
-     * Sets data source bean name.
-     *
-     * @param beanName Data source bean name.
-     * @return {@code This} for chaining.
-     */
-    public CassandraCacheStoreFactory<K, V> setDataSourceBean(String beanName) {
-        this.dataSrcBean = beanName;
-
-        return this;
-    }
-
-    /**
-     * Sets persistence settings.
-     *
-     * @param settings Persistence settings.
-     * @return {@code This} for chaining.
-     */
-    public CassandraCacheStoreFactory<K, V> setPersistenceSettings(KeyValuePersistenceSettings settings) {
-        this.persistenceSettings = settings;
-
-        return this;
-    }
-
-    /**
-     * Sets persistence settings bean name.
-     *
-     * @param beanName Persistence settings bean name.
-     * @return {@code This} for chaining.
-     */
-    public CassandraCacheStoreFactory<K, V> setPersistenceSettingsBean(String beanName) {
-        this.persistenceSettingsBean = beanName;
-
-        return this;
-    }
-
-    /**
-     * @return Data source.
-     */
-    private DataSource getDataSource() {
-        if (dataSrc != null)
-            return dataSrc;
-
-        if (dataSrcBean == null)
-            throw new IllegalStateException("Either DataSource bean or DataSource itself should be specified");
-
-        if (appCtx == null) {
-            throw new IllegalStateException("Failed to get Cassandra DataSource cause Spring application " +
-                "context wasn't injected into CassandraCacheStoreFactory");
-        }
-
-        Object obj = loadSpringContextBean(appCtx, dataSrcBean);
-
-        if (!(obj instanceof DataSource))
-            throw new IllegalStateException("Incorrect connection bean '" + dataSrcBean + "' specified");
-
-        return dataSrc = (DataSource)obj;
-    }
-
-    /**
-     * @return Persistence settings.
-     */
-    private KeyValuePersistenceSettings getPersistenceSettings() {
-        if (persistenceSettings != null)
-            return persistenceSettings;
-
-        if (persistenceSettingsBean == null) {
-            throw new IllegalStateException("Either persistence settings bean or persistence settings itself " +
-                "should be specified");
-        }
-
-        if (appCtx == null) {
-            throw new IllegalStateException("Failed to get Cassandra persistence settings cause Spring application " +
-                "context wasn't injected into CassandraCacheStoreFactory");
-        }
-
-        Object obj = loadSpringContextBean(appCtx, persistenceSettingsBean);
-
-        if (!(obj instanceof KeyValuePersistenceSettings)) {
-            throw new IllegalStateException("Incorrect persistence settings bean '" +
-                persistenceSettingsBean + "' specified");
-        }
-
-        return persistenceSettings = (KeyValuePersistenceSettings)obj;
-    }
-
-    /**
-     * Get maximum workers thread count. These threads are responsible for queries execution.
-     *
-     * @return Maximum workers thread count.
-     */
-    public int getMaxPoolSize() {
-        return maxPoolSize;
-    }
-
-    /**
-     * Set Maximum workers thread count. These threads are responsible for queries execution.
-     *
-     * @param maxPoolSize Max workers thread count.
-     * @return {@code This} for chaining.
-     */
-    public CassandraCacheStoreFactory<K, V> setMaxPoolSize(int maxPoolSize) {
-        this.maxPoolSize = maxPoolSize;
-
-        return this;
-    }
-
-    /**
-     * Loads bean from Spring ApplicationContext.
-     *
-     * @param appCtx Application context.
-     * @param beanName Bean name to load.
-     * @return Loaded bean.
-     */
-    private Object loadSpringContextBean(Object appCtx, String beanName) {
-        try {
-            IgniteSpringHelper spring = IgniteComponentType.SPRING.create(false);
-            return spring.loadBeanFromAppContext(appCtx, beanName);
-        }
-        catch (Exception e) {
-            throw new IgniteException(
-                "Failed to load bean in application context [beanName=" + beanName + ", igniteConfig=" + appCtx + ']',
-                e
-            );
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
deleted file mode 100644
index 139a97d030c9a..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/CassandraHelper.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.common;
-
-import java.net.InetSocketAddress;
-import java.util.Map;
-import java.util.regex.Pattern;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.DataType;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.exceptions.DriverException;
-import com.datastax.driver.core.exceptions.InvalidQueryException;
-import com.datastax.driver.core.exceptions.NoHostAvailableException;
-import com.datastax.driver.core.exceptions.ReadTimeoutException;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Helper class providing methods to work with Cassandra session and exceptions
- */
-public class CassandraHelper {
-    /** Cassandra error message if specified keyspace doesn't exist. */
-    private static final Pattern KEYSPACE_EXIST_ERROR1 = Pattern.compile("Keyspace [0-9a-zA-Z_]+ does not exist");
-
-    /** Cassandra error message if trying to create table inside nonexistent keyspace. */
-    private static final Pattern KEYSPACE_EXIST_ERROR2 = Pattern.compile("Cannot add table '[0-9a-zA-Z_]+' to non existing keyspace.*");
-
-    /** Cassandra error message if trying to create table inside nonexistent keyspace. */
-    private static final Pattern KEYSPACE_EXIST_ERROR3 = Pattern.compile("Error preparing query, got ERROR INVALID: " +
-            "Keyspace [0-9a-zA-Z_]+ does not exist");
-
-    /** Cassandra error message if specified table doesn't exist. */
-    private static final Pattern TABLE_EXIST_ERROR1 = Pattern.compile("unconfigured table [0-9a-zA-Z_]+");
-
-    /** Cassandra error message if specified table doesn't exist. */
-    private static final String TABLE_EXIST_ERROR2 = "Error preparing query, got ERROR INVALID: unconfigured table";
-    
-    /** Cassandra error message if specified table doesn't exist. */
-    private static final Pattern TABLE_EXIST_ERROR3 = Pattern.compile("unconfigured columnfamily [0-9a-zA-Z_]+");
-
-    /** Cassandra error message if trying to use prepared statement created from another session. */
-    private static final String PREP_STATEMENT_CLUSTER_INSTANCE_ERROR = "You may have used a PreparedStatement that " +
-        "was created with another Cluster instance";
-
-    /**
-     * Closes Cassandra driver session.
-     *
-     * @param driverSes Session to close.
-     */
-    public static void closeSession(Session driverSes) {
-        if (driverSes == null)
-            return;
-
-        Cluster cluster = driverSes.getCluster();
-
-        if (!driverSes.isClosed())
-            U.closeQuiet(driverSes);
-
-        if (!cluster.isClosed())
-            U.closeQuiet(cluster);
-    }
-
-    /**
-     * Checks if Cassandra keyspace absence error occur.
-     *
-     * @param e Exception to check.
-     * @return {@code true} in case of keyspace absence error.
-     */
-    public static boolean isKeyspaceAbsenceError(Throwable e) {
-        while (e != null) {
-            if (e instanceof InvalidQueryException &&
-                (KEYSPACE_EXIST_ERROR1.matcher(e.getMessage()).matches() ||
-                    KEYSPACE_EXIST_ERROR2.matcher(e.getMessage()).matches()))
-                return true;
-
-            e = e.getCause();
-        }
-
-        return false;
-    }
-
-    /**
-     * Checks if Cassandra table absence error occur.
-     *
-     * @param e Exception to check.
-     * @return {@code true} in case of table absence error.
-     */
-    public static boolean isTableAbsenceError(Throwable e) {
-        while (e != null) {
-            if (e instanceof InvalidQueryException &&
-                (TABLE_EXIST_ERROR1.matcher(e.getMessage()).matches() ||
-                    TABLE_EXIST_ERROR3.matcher(e.getMessage()).matches() ||
-                    KEYSPACE_EXIST_ERROR1.matcher(e.getMessage()).matches() ||
-                    KEYSPACE_EXIST_ERROR2.matcher(e.getMessage()).matches()))
-                return true;
-
-            if (e instanceof NoHostAvailableException && ((NoHostAvailableException)e).getErrors() != null) {
-                NoHostAvailableException ex = (NoHostAvailableException)e;
-
-                for (Map.Entry<InetSocketAddress, Throwable> entry : ex.getErrors().entrySet()) {
-                    Throwable error = entry.getValue();
-
-                    if (error instanceof DriverException &&
-                        (error.getMessage().contains(TABLE_EXIST_ERROR2) ||
-                             KEYSPACE_EXIST_ERROR3.matcher(error.getMessage()).matches()))
-                        return true;
-                }
-            }
-
-            e = e.getCause();
-        }
-
-        return false;
-    }
-
-    /**
-     * Checks if Cassandra host availability error occur, thus host became unavailable.
-     *
-     * @param e Exception to check.
-     * @return {@code true} in case of host not available error.
-     */
-    public static boolean isHostsAvailabilityError(Throwable e) {
-        while (e != null) {
-            if (e instanceof NoHostAvailableException ||
-                e instanceof ReadTimeoutException)
-                return true;
-
-            e = e.getCause();
-        }
-
-        return false;
-    }
-
-    /**
-     * Checks if Cassandra error occur because of prepared statement created in one session was used in another session.
-     *
-     * @param e Exception to check.
-     * @return {@code true} in case of invalid usage of prepared statement.
-     */
-    public static boolean isPreparedStatementClusterError(Throwable e) {
-        while (e != null) {
-            if (e instanceof InvalidQueryException && e.getMessage().contains(PREP_STATEMENT_CLUSTER_INSTANCE_ERROR))
-                return true;
-
-            e = e.getCause();
-        }
-
-        return false;
-    }
-
-    /**
-     * Checks if two Java classes are Cassandra compatible - mapped to the same Cassandra type.
-     *
-     * @param type1 First type.
-     * @param type2 Second type.
-     * @return {@code true} if classes are compatible and {@code false} if not.
-     */
-    public static boolean isCassandraCompatibleTypes(Class type1, Class type2) {
-        if (type1 == null || type2 == null)
-            return false;
-
-        DataType.Name t1 = PropertyMappingHelper.getCassandraType(type1);
-        DataType.Name t2 = PropertyMappingHelper.getCassandraType(type2);
-
-        return t1 != null && t2 != null && t1.equals(t2);
-    }
-}
-
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
deleted file mode 100644
index 9f6d457a8e210..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/PropertyMappingHelper.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.common;
-
-import java.beans.PropertyDescriptor;
-import java.lang.reflect.Field;
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.UUID;
-import com.datastax.driver.core.DataType;
-import com.datastax.driver.core.Row;
-import org.apache.commons.beanutils.PropertyUtils;
-import org.apache.ignite.cache.store.cassandra.persistence.PojoFieldAccessor;
-import org.apache.ignite.cache.store.cassandra.serializer.Serializer;
-
-/**
- * Helper class providing bunch of methods to discover fields of POJO objects and
- * map builtin Java types to appropriate Cassandra types.
- */
-public class PropertyMappingHelper {
-    /** Bytes array Class type. */
-    private static final Class BYTES_ARRAY_CLASS = (new byte[] {}).getClass();
-
-    /** Mapping from Java to Cassandra types. */
-    private static final Map<Class, DataType.Name> JAVA_TO_CASSANDRA_MAPPING = new HashMap<Class, DataType.Name>();
-
-    static {
-        put(String.class, DataType.Name.TEXT);
-        put(Integer.class, DataType.Name.INT);
-        put(int.class, DataType.Name.INT);
-        put(Short.class, DataType.Name.SMALLINT);
-        put(short.class, DataType.Name.SMALLINT);
-        put(Long.class, DataType.Name.BIGINT);
-        put(long.class, DataType.Name.BIGINT);
-        put(Double.class, DataType.Name.DOUBLE);
-        put(double.class, DataType.Name.DOUBLE);
-        put(Boolean.class, DataType.Name.BOOLEAN);
-        put(boolean.class, DataType.Name.BOOLEAN);
-        put(Float.class, DataType.Name.FLOAT);
-        put(float.class, DataType.Name.FLOAT);
-        put(ByteBuffer.class, DataType.Name.BLOB);
-        put(BYTES_ARRAY_CLASS, DataType.Name.BLOB);
-        put(BigDecimal.class, DataType.Name.DECIMAL);
-        put(InetAddress.class, DataType.Name.INET);
-        put(Date.class, DataType.Name.TIMESTAMP);
-        put(UUID.class, DataType.Name.UUID);
-        put(BigInteger.class, DataType.Name.VARINT);
-    }
-
-    /** */
-    private static void put(Class cls, DataType.Name name) {
-        JAVA_TO_CASSANDRA_MAPPING.put(cls, name);
-    }
-
-    /**
-     * Maps Cassandra type to specified Java type.
-     *
-     * @param clazz java class.
-     *
-     * @return Cassandra type.
-     */
-    public static DataType.Name getCassandraType(Class clazz) {
-        return JAVA_TO_CASSANDRA_MAPPING.get(clazz);
-    }
-
-    /**
-     * Returns property accessor by class property name.
-     *
-     * @param clazz class from which to get property accessor.
-     * @param prop name of the property.
-     *
-     * @return property accessor.
-     */
-    public static PojoFieldAccessor getPojoFieldAccessor(Class clazz, String prop) {
-        PropertyDescriptor[] descriptors = PropertyUtils.getPropertyDescriptors(clazz);
-
-        if (descriptors != null) {
-            for (PropertyDescriptor descriptor : descriptors) {
-                if (descriptor.getName().equals(prop)) {
-                    Field field = null;
-
-                    try {
-                        field = clazz.getDeclaredField(prop);
-                    }
-                    catch (Throwable ignore) {
-                    }
-
-                    return new PojoFieldAccessor(descriptor, field);
-                }
-            }
-        }
-
-        try {
-            return new PojoFieldAccessor(clazz.getDeclaredField(prop));
-        }
-        catch (Throwable e) {
-            throw new IllegalArgumentException("POJO class " + clazz.getName() + " doesn't have '" + prop + "' property");
-        }
-    }
-
-    /**
-     * Returns value of specific column in the row returned by CQL statement.
-     *
-     * @param row row returned by CQL statement.
-     * @param col column name.
-     * @param clazz java class to which column value should be casted.
-     * @param serializer serializer to use if column stores BLOB otherwise could be null.
-     *
-     * @return row column value.
-     */
-    public static Object getCassandraColumnValue(Row row, String col, Class clazz, Serializer serializer) {
-        if (String.class.equals(clazz))
-            return row.getString(col);
-
-        if (Integer.class.equals(clazz))
-            return row.isNull(col) ? null : row.getInt(col);
-
-        if (int.class.equals(clazz)) {
-            if (row.isNull(col)) {
-                throw new IllegalArgumentException("Can't cast null value from Cassandra table column '" + col +
-                        "' to " + "int value used in domain object model");
-            }
-
-            return row.getInt(col);
-        }
-
-        if (Short.class.equals(clazz))
-            return row.isNull(col) ? null : row.getShort(col);
-
-        if (short.class.equals(clazz)) {
-            if (row.isNull(col)) {
-                throw new IllegalArgumentException("Can't cast null value from Cassandra table column '" + col +
-                        "' to " + "short value used in domain object model");
-            }
-
-            return row.getShort(col);
-        }
-
-        if (Long.class.equals(clazz))
-            return row.isNull(col) ? null : row.getLong(col);
-
-        if (long.class.equals(clazz)) {
-            if (row.isNull(col)) {
-                throw new IllegalArgumentException("Can't cast null value from Cassandra table column '" + col +
-                        "' to " + "long value used in domain object model");
-            }
-
-            return row.getLong(col);
-        }
-
-        if (Double.class.equals(clazz))
-            return row.isNull(col) ? null : row.getDouble(col);
-
-        if (double.class.equals(clazz)) {
-            if (row.isNull(col)) {
-                throw new IllegalArgumentException("Can't cast null value from Cassandra table column '" + col +
-                        "' to " + "double value used in domain object model");
-            }
-
-            return row.getDouble(col);
-        }
-
-        if (Boolean.class.equals(clazz))
-            return row.isNull(col) ? null : row.getBool(col);
-
-        if (boolean.class.equals(clazz)) {
-            if (row.isNull(col)) {
-                throw new IllegalArgumentException("Can't cast null value from Cassandra table column '" + col +
-                        "' to " + "boolean value used in domain object model");
-            }
-
-            return row.getBool(col);
-        }
-
-        if (Float.class.equals(clazz))
-            return row.isNull(col) ? null : row.getFloat(col);
-
-        if (float.class.equals(clazz)) {
-            if (row.isNull(col)) {
-                throw new IllegalArgumentException("Can't cast null value from Cassandra table column '" + col +
-                        "' to " + "float value used in domain object model");
-            }
-
-            return row.getFloat(col);
-        }
-
-        if (ByteBuffer.class.equals(clazz))
-            return row.getBytes(col);
-
-        if (PropertyMappingHelper.BYTES_ARRAY_CLASS.equals(clazz)) {
-            ByteBuffer buf = row.getBytes(col);
-            return buf == null ? null : buf.array();
-        }
-
-        if (BigDecimal.class.equals(clazz))
-            return row.getDecimal(col);
-
-        if (InetAddress.class.equals(clazz))
-            return row.getInet(col);
-
-        if (Date.class.equals(clazz))
-            return row.getTimestamp(col);
-
-        if (UUID.class.equals(clazz))
-            return row.getUUID(col);
-
-        if (BigInteger.class.equals(clazz))
-            return row.getVarint(col);
-
-        if (serializer == null) {
-            throw new IllegalStateException("Can't deserialize value from '" + col + "' Cassandra column, " +
-                "cause there is no BLOB serializer specified");
-        }
-
-        ByteBuffer buf = row.getBytes(col);
-
-        return buf == null ? null : serializer.deserialize(buf);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
deleted file mode 100644
index dcf5334e1a4de..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/RandomSleeper.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.common;
-
-import java.util.Random;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteLogger;
-
-/**
- * Provides sleep method with randomly selected sleep time from specified range and
- * incrementally shifts sleep time range for each next sleep attempt
- *
- */
-public class RandomSleeper {
-    /** */
-    private int min;
-
-    /** */
-    private int max;
-
-    /** */
-    private int incr;
-
-    /** */
-    private IgniteLogger log;
-
-    /** */
-    private Random random = new Random(System.currentTimeMillis());
-
-    /** */
-    private int summary;
-
-    /**
-     * Creates sleeper instance.
-     *
-     * @param min minimum sleep time (in milliseconds)
-     * @param max maximum sleep time (in milliseconds)
-     * @param incr time range shift increment (in milliseconds)
-     * @param log Instance of the Ignite logger.
-     */
-    public RandomSleeper(int min, int max, int incr, IgniteLogger log) {
-        if (min <= 0)
-            throw new IllegalArgumentException("Incorrect min time specified: " + min);
-
-        if (max <= min)
-            throw new IllegalArgumentException("Incorrect max time specified: " + max);
-
-        if (incr < 10)
-            throw new IllegalArgumentException("Incorrect increment specified: " + incr);
-
-        this.min = min;
-        this.max = max;
-        this.incr = incr;
-        this.log = log;
-    }
-
-    /**
-     * Sleeps
-     */
-    public void sleep() {
-        try {
-            int timeout = random.nextInt(max - min + 1) + min;
-
-            if (log != null)
-                log.info("Sleeping for " + timeout + "ms");
-
-            Thread.sleep(timeout);
-
-            summary += timeout;
-
-            if (log != null)
-                log.info("Sleep completed");
-        }
-        catch (InterruptedException e) {
-            throw new IgniteException("Random sleep interrupted", e);
-        }
-
-        min += incr;
-        max += incr;
-    }
-
-    /**
-     * Returns summary sleep time.
-     *
-     * @return Summary sleep time in milliseconds.
-     */
-    public int getSleepSummary() {
-        return summary;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java
deleted file mode 100644
index 5d5148831eae1..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/SystemHelper.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.common;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-
-/**
- * Helper class providing system information about the host (ip, hostname, os and etc.)
- */
-public class SystemHelper {
-    /** System line separator. */
-    public static final String LINE_SEPARATOR = System.getProperty("line.separator");
-
-    /** Host name. */
-    public static final String HOST_NAME;
-
-    /** Host IP address */
-    public static final String HOST_IP;
-
-    static {
-        try {
-            InetAddress addr = InetAddress.getLocalHost();
-            HOST_NAME = addr.getHostName();
-            HOST_IP = addr.getHostAddress();
-        }
-        catch (UnknownHostException e) {
-            throw new IllegalStateException("Failed to get host/ip of current computer", e);
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java
deleted file mode 100644
index 2505a24e0c78f..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/common/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains commonly used helper classes
- */
-
-package org.apache.ignite.cache.store.cassandra.common;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
deleted file mode 100644
index a2358a63d136f..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/Credentials.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.datasource;
-
-import java.io.Serializable;
-
-/**
- * Provides credentials for Cassandra (instead of specifying user/password directly in Spring context XML).
- */
-public interface Credentials extends Serializable {
-    /**
-     * Returns user name
-     *
-     * @return user name
-     */
-    public String getUser();
-
-    /**
-     * Returns password
-     *
-     * @return password
-     */
-    public String getPassword();
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
deleted file mode 100644
index 19ebbe311e557..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/DataSource.java
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.datasource;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.io.Serializable;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-import com.datastax.driver.core.AuthProvider;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.NettyOptions;
-import com.datastax.driver.core.PoolingOptions;
-import com.datastax.driver.core.ProtocolOptions;
-import com.datastax.driver.core.ProtocolVersion;
-import com.datastax.driver.core.SSLOptions;
-import com.datastax.driver.core.SocketOptions;
-import com.datastax.driver.core.policies.AddressTranslator;
-import com.datastax.driver.core.policies.LoadBalancingPolicy;
-import com.datastax.driver.core.policies.ReconnectionPolicy;
-import com.datastax.driver.core.policies.RetryPolicy;
-import com.datastax.driver.core.policies.SpeculativeExecutionPolicy;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.cache.store.cassandra.session.CassandraSession;
-import org.apache.ignite.cache.store.cassandra.session.CassandraSessionImpl;
-import org.apache.ignite.internal.util.tostring.GridToStringExclude;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Data source abstraction to specify configuration of the Cassandra session to be used.
- */
-public class DataSource implements Externalizable {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Null object, used as a replacement for those Cassandra connection options which
-     * don't support serialization (RetryPolicy, LoadBalancingPolicy and etc).
-     */
-    private static final UUID NULL_OBJECT = UUID.fromString("45ffae47-3193-5910-84a2-048fe65735d9");
-
-    /** Default expiration timeout for Cassandra driver session. */
-    public static final long DFLT_SESSION_EXPIRATION_TIMEOUT = 300000; // 5 minutes.
-
-    /** Number of rows to immediately fetch in CQL statement execution. */
-    private Integer fetchSize;
-
-    /** Consistency level for READ operations. */
-    private ConsistencyLevel readConsistency;
-
-    /** Consistency level for WRITE operations. */
-    private ConsistencyLevel writeConsistency;
-
-    /** Username to use for authentication. */
-    @GridToStringExclude
-    private String user;
-
-    /** Password to use for authentication. */
-    @GridToStringExclude
-    private String pwd;
-
-    /** Port to use for Cassandra connection. */
-    private Integer port;
-
-    /** List of contact points to connect to Cassandra cluster. */
-    private List<InetAddress> contactPoints;
-
-    /** List of contact points with ports to connect to Cassandra cluster. */
-    private List<InetSocketAddress> contactPointsWithPorts;
-
-    /** Maximum time to wait for schema agreement before returning from a DDL query. */
-    private Integer maxSchemaAgreementWaitSeconds;
-
-    /** The native protocol version to use. */
-    private Integer protoVer;
-
-    /** Compression to use for the transport. */
-    private String compression;
-
-    /** Use SSL for communications with Cassandra. */
-    private Boolean useSSL;
-
-    /** Enables metrics collection. */
-    private Boolean collectMetrix;
-
-    /** Enables JMX reporting of the metrics. */
-    private Boolean jmxReporting;
-
-    /** Credentials to use for authentication. */
-    private Credentials creds;
-
-    /** Load balancing policy to use. */
-    private LoadBalancingPolicy loadBalancingPlc;
-
-    /** Reconnection policy to use. */
-    private ReconnectionPolicy reconnectionPlc;
-
-    /** Retry policy to use. */
-    private RetryPolicy retryPlc;
-
-    /** Address translator to use. */
-    private AddressTranslator addrTranslator;
-
-    /** Speculative execution policy to use. */
-    private SpeculativeExecutionPolicy speculativeExecutionPlc;
-
-    /** Authentication provider to use. */
-    private AuthProvider authProvider;
-
-    /** SSL options to use. */
-    private SSLOptions sslOptions;
-
-    /** Connection pooling options to use. */
-    private PoolingOptions poolingOptions;
-
-    /** Socket options to use. */
-    private SocketOptions sockOptions;
-
-    /** Netty options to use for connection. */
-    private NettyOptions nettyOptions;
-
-    /** Expiration timeout for Cassandra driver session. */
-    private long sessionExpirationTimeout = DFLT_SESSION_EXPIRATION_TIMEOUT;
-
-    /** Cassandra session wrapper instance. */
-    private volatile CassandraSession ses;
-
-    /**
-     * Sets user name to use for authentication.
-     *
-     * @param user user name
-     */
-    public void setUser(String user) {
-        this.user = user;
-
-        invalidate();
-    }
-
-    /**
-     * Sets password to use for authentication.
-     *
-     * @param pwd password
-     */
-    public void setPassword(String pwd) {
-        this.pwd = pwd;
-
-        invalidate();
-    }
-
-    /**
-     * Sets port to use for Cassandra connection.
-     *
-     * @param port port
-     */
-    public void setPort(int port) {
-        this.port = port;
-
-        invalidate();
-    }
-
-    /**
-     * Sets list of contact points to connect to Cassandra cluster.
-     *
-     * @param points contact points
-     */
-    public void setContactPoints(String... points) {
-        if (points == null || points.length == 0)
-            return;
-
-        for (String point : points) {
-            if (point.contains(":")) {
-                if (contactPointsWithPorts == null)
-                    contactPointsWithPorts = new LinkedList<>();
-
-                String[] chunks = point.split(":");
-
-                try {
-                    contactPointsWithPorts.add(InetSocketAddress.createUnresolved(chunks[0].trim(), Integer.parseInt(chunks[1].trim())));
-                }
-                catch (Throwable e) {
-                    throw new IllegalArgumentException("Incorrect contact point '" + point + "' specified for Cassandra cache storage", e);
-                }
-            }
-            else {
-                if (contactPoints == null)
-                    contactPoints = new LinkedList<>();
-
-                try {
-                    contactPoints.add(InetAddress.getByName(point));
-                }
-                catch (Throwable e) {
-                    throw new IllegalArgumentException("Incorrect contact point '" + point + "' specified for Cassandra cache storage", e);
-                }
-            }
-        }
-
-        invalidate();
-    }
-
-    /** @param seconds Maximum time to wait for schema agreement before returning from a DDL query. */
-    public void setMaxSchemaAgreementWaitSeconds(int seconds) {
-        maxSchemaAgreementWaitSeconds = seconds;
-
-        invalidate();
-    }
-
-    /**
-     * Sets the native protocol version to use.
-     *
-     * @param ver version number
-     */
-    public void setProtocolVersion(int ver) {
-        protoVer = ver;
-
-        invalidate();
-    }
-
-    /**
-     * Sets compression algorithm to use for the transport.
-     *
-     * @param compression Compression algorithm.
-     */
-    public void setCompression(String compression) {
-        this.compression = compression == null || compression.trim().isEmpty() ? null : compression.trim();
-
-        try {
-            if (this.compression != null)
-                ProtocolOptions.Compression.valueOf(this.compression);
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Incorrect compression '" + compression + "' specified for Cassandra connection", e);
-        }
-
-        invalidate();
-    }
-
-    /**
-     * Enables SSL for communications with Cassandra.
-     *
-     * @param use Flag to enable/disable SSL.
-     */
-    public void setUseSSL(boolean use) {
-        useSSL = use;
-
-        invalidate();
-    }
-
-    /**
-     * Enables metrics collection.
-     *
-     * @param collect Flag to enable/disable metrics collection.
-     */
-    public void setCollectMetrix(boolean collect) {
-        collectMetrix = collect;
-
-        invalidate();
-    }
-
-    /**
-     * Enables JMX reporting of the metrics.
-     *
-     * @param enableReporting Flag to enable/disable JMX reporting.
-     */
-    public void setJmxReporting(boolean enableReporting) {
-        jmxReporting = enableReporting;
-
-        invalidate();
-    }
-
-    /**
-     * Sets number of rows to immediately fetch in CQL statement execution.
-     *
-     * @param size Number of rows to fetch.
-     */
-    public void setFetchSize(int size) {
-        fetchSize = size;
-
-        invalidate();
-    }
-
-    /**
-     * Set consistency level for READ operations.
-     *
-     * @param level Consistency level.
-     */
-    public void setReadConsistency(String level) {
-        readConsistency = parseConsistencyLevel(level);
-
-        invalidate();
-    }
-
-    /**
-     * Set consistency level for WRITE operations.
-     *
-     * @param level Consistency level.
-     */
-    public void setWriteConsistency(String level) {
-        writeConsistency = parseConsistencyLevel(level);
-
-        invalidate();
-    }
-
-    /**
-     * Sets credentials to use for authentication.
-     *
-     * @param creds Credentials.
-     */
-    public void setCredentials(Credentials creds) {
-        this.creds = creds;
-
-        invalidate();
-    }
-
-    /**
-     * Sets load balancing policy.
-     *
-     * @param plc Load balancing policy.
-     */
-    public void setLoadBalancingPolicy(LoadBalancingPolicy plc) {
-        loadBalancingPlc = plc;
-
-        invalidate();
-    }
-
-    /**
-     * Sets reconnection policy.
-     *
-     * @param plc Reconnection policy.
-     */
-    public void setReconnectionPolicy(ReconnectionPolicy plc) {
-        reconnectionPlc = plc;
-
-        invalidate();
-    }
-
-    /**
-     * Sets retry policy.
-     *
-     * @param plc Retry policy.
-     */
-    public void setRetryPolicy(RetryPolicy plc) {
-        retryPlc = plc;
-
-        invalidate();
-    }
-
-    /**
-     * Sets address translator.
-     *
-     * @param translator Address translator.
-     */
-    public void setAddressTranslator(AddressTranslator translator) {
-        addrTranslator = translator;
-
-        invalidate();
-    }
-
-    /**
-     * Sets speculative execution policy.
-     *
-     * @param plc Speculative execution policy.
-     */
-    public void setSpeculativeExecutionPolicy(SpeculativeExecutionPolicy plc) {
-        speculativeExecutionPlc = plc;
-
-        invalidate();
-    }
-
-    /**
-     * Sets authentication provider.
-     *
-     * @param provider Authentication provider.
-     */
-    public void setAuthProvider(AuthProvider provider) {
-        authProvider = provider;
-
-        invalidate();
-    }
-
-    /**
-     * Sets SSL options.
-     *
-     * @param options SSL options.
-     */
-    public void setSslOptions(SSLOptions options) {
-        sslOptions = options;
-
-        invalidate();
-    }
-
-    /**
-     * Sets pooling options.
-     *
-     * @param options pooling options to use.
-     */
-    public void setPoolingOptions(PoolingOptions options) {
-        poolingOptions = options;
-
-        invalidate();
-    }
-
-    /**
-     * Sets socket options to use.
-     *
-     * @param options Socket options.
-     */
-    public void setSocketOptions(SocketOptions options) {
-        sockOptions = options;
-
-        invalidate();
-    }
-
-    /**
-     * Sets netty options to use.
-     *
-     * @param options netty options.
-     */
-    public void setNettyOptions(NettyOptions options) {
-        nettyOptions = options;
-
-        invalidate();
-    }
-
-    /**
-     * Sets expiration timeout for Cassandra driver session. Idle sessions that are not
-     * used during this timeout value will be automatically closed and recreated later
-     * on demand.
-     * <p>
-     * If set to {@code 0}, timeout is disabled.
-     * <p>
-     * Default value is {@link #DFLT_SESSION_EXPIRATION_TIMEOUT}.
-     *
-     * @param sessionExpirationTimeout Expiration timeout for Cassandra driver session.
-     */
-    public void setSessionExpirationTimeout(long sessionExpirationTimeout) {
-        this.sessionExpirationTimeout = sessionExpirationTimeout;
-
-        invalidate();
-    }
-
-    /**
-     * Creates Cassandra session wrapper if it wasn't created yet and returns it
-     *
-     * @param log logger
-     * @return Cassandra session wrapper
-     */
-    public synchronized CassandraSession session(IgniteLogger log) {
-        if (ses != null)
-            return ses;
-
-        Cluster.Builder builder = Cluster.builder();
-
-        if (user != null)
-            builder = builder.withCredentials(user, pwd);
-
-        if (port != null)
-            builder = builder.withPort(port);
-
-        if (contactPoints != null)
-            builder = builder.addContactPoints(contactPoints);
-
-        if (contactPointsWithPorts != null)
-            builder = builder.addContactPointsWithPorts(contactPointsWithPorts);
-
-        if (maxSchemaAgreementWaitSeconds != null)
-            builder = builder.withMaxSchemaAgreementWaitSeconds(maxSchemaAgreementWaitSeconds);
-
-        if (protoVer != null)
-            builder = builder.withProtocolVersion(ProtocolVersion.fromInt(protoVer));
-
-        if (compression != null) {
-            try {
-                builder = builder.withCompression(ProtocolOptions.Compression.valueOf(compression.trim().toLowerCase()));
-            }
-            catch (IllegalArgumentException e) {
-                throw new IgniteException("Incorrect compression option '" + compression + "' specified for Cassandra connection", e);
-            }
-        }
-
-        if (useSSL != null && useSSL)
-            builder = builder.withSSL();
-
-        if (sslOptions != null)
-            builder = builder.withSSL(sslOptions);
-
-        if (collectMetrix != null && !collectMetrix)
-            builder = builder.withoutMetrics();
-
-        if (jmxReporting != null && !jmxReporting)
-            builder = builder.withoutJMXReporting();
-
-        if (creds != null)
-            builder = builder.withCredentials(creds.getUser(), creds.getPassword());
-
-        if (loadBalancingPlc != null)
-            builder = builder.withLoadBalancingPolicy(loadBalancingPlc);
-
-        if (reconnectionPlc != null)
-            builder = builder.withReconnectionPolicy(reconnectionPlc);
-
-        if (retryPlc != null)
-            builder = builder.withRetryPolicy(retryPlc);
-
-        if (addrTranslator != null)
-            builder = builder.withAddressTranslator(addrTranslator);
-
-        if (speculativeExecutionPlc != null)
-            builder = builder.withSpeculativeExecutionPolicy(speculativeExecutionPlc);
-
-        if (authProvider != null)
-            builder = builder.withAuthProvider(authProvider);
-
-        if (poolingOptions != null)
-            builder = builder.withPoolingOptions(poolingOptions);
-
-        if (sockOptions != null)
-            builder = builder.withSocketOptions(sockOptions);
-
-        if (nettyOptions != null)
-            builder = builder.withNettyOptions(nettyOptions);
-
-        return ses = new CassandraSessionImpl(
-            builder, fetchSize, readConsistency, writeConsistency, sessionExpirationTimeout, log);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeObject(fetchSize);
-        out.writeObject(readConsistency);
-        out.writeObject(writeConsistency);
-        U.writeString(out, user);
-        U.writeString(out, pwd);
-        out.writeObject(port);
-        out.writeObject(contactPoints);
-        out.writeObject(contactPointsWithPorts);
-        out.writeObject(maxSchemaAgreementWaitSeconds);
-        out.writeObject(protoVer);
-        U.writeString(out, compression);
-        out.writeObject(useSSL);
-        out.writeObject(collectMetrix);
-        out.writeObject(jmxReporting);
-        out.writeObject(creds);
-        writeObject(out, loadBalancingPlc);
-        writeObject(out, reconnectionPlc);
-        writeObject(out, addrTranslator);
-        writeObject(out, speculativeExecutionPlc);
-        writeObject(out, authProvider);
-        writeObject(out, sslOptions);
-        writeObject(out, poolingOptions);
-        writeObject(out, sockOptions);
-        writeObject(out, nettyOptions);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        fetchSize = (Integer)in.readObject();
-        readConsistency = (ConsistencyLevel)in.readObject();
-        writeConsistency = (ConsistencyLevel)in.readObject();
-        user = U.readString(in);
-        pwd = U.readString(in);
-        port = (Integer)in.readObject();
-        contactPoints = (List<InetAddress>)in.readObject();
-        contactPointsWithPorts = (List<InetSocketAddress>)in.readObject();
-        maxSchemaAgreementWaitSeconds = (Integer)in.readObject();
-        protoVer = (Integer)in.readObject();
-        compression = U.readString(in);
-        useSSL = (Boolean)in.readObject();
-        collectMetrix = (Boolean)in.readObject();
-        jmxReporting = (Boolean)in.readObject();
-        creds = (Credentials)in.readObject();
-        loadBalancingPlc = (LoadBalancingPolicy)readObject(in);
-        reconnectionPlc = (ReconnectionPolicy)readObject(in);
-        addrTranslator = (AddressTranslator)readObject(in);
-        speculativeExecutionPlc = (SpeculativeExecutionPolicy)readObject(in);
-        authProvider = (AuthProvider)readObject(in);
-        sslOptions = (SSLOptions)readObject(in);
-        poolingOptions = (PoolingOptions)readObject(in);
-        sockOptions = (SocketOptions)readObject(in);
-        nettyOptions = (NettyOptions)readObject(in);
-    }
-
-    /**
-     * Helper method used to serialize class members
-     * @param out the stream to write the object to
-     * @param obj the object to be written
-     * @throws IOException Includes any I/O exceptions that may occur
-     */
-    private void writeObject(ObjectOutput out, Object obj) throws IOException {
-        out.writeObject(obj == null || !(obj instanceof Serializable) ? NULL_OBJECT : obj);
-    }
-
-    /**
-     * Helper method used to deserialize class members
-     * @param in the stream to read data from in order to restore the object
-     * @throws IOException Includes any I/O exceptions that may occur
-     * @throws ClassNotFoundException If the class for an object being restored cannot be found
-     * @return deserialized object
-     */
-    private Object readObject(ObjectInput in) throws IOException, ClassNotFoundException {
-        Object obj = in.readObject();
-        return NULL_OBJECT.equals(obj) ? null : obj;
-    }
-
-    /**
-     * Parses consistency level provided as string.
-     *
-     * @param level consistency level string.
-     *
-     * @return consistency level.
-     */
-    private ConsistencyLevel parseConsistencyLevel(String level) {
-        if (level == null)
-            return null;
-
-        try {
-            return ConsistencyLevel.valueOf(level.trim().toUpperCase());
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Incorrect consistency level '" + level + "' specified for Cassandra connection", e);
-        }
-    }
-
-    /**
-     * Invalidates session.
-     */
-    private synchronized void invalidate() {
-        ses = null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return S.toString(DataSource.class, this);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
deleted file mode 100644
index 46ebdc543b7f8..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/PlainCredentials.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.datasource;
-
-/**
- * Simple implementation of {@link Credentials} which just uses its constructor to hold user/password values.
- */
-public class PlainCredentials implements Credentials {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** User name. */
-    private String user;
-
-    /** User password. */
-    private String pwd;
-
-    /**
-     * Creates credentials object.
-     *
-     * @param user User name.
-     * @param pwd User password.
-     */
-    public PlainCredentials(String user, String pwd) {
-        this.user = user;
-        this.pwd = pwd;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getUser() {
-        return user;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getPassword() {
-        return pwd;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java
deleted file mode 100644
index 6e1d22aed9aa2..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/datasource/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains data source implementation
- */
-
-package org.apache.ignite.cache.store.cassandra.datasource;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java
deleted file mode 100644
index 00aee904054d0..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains {@link org.apache.ignite.cache.store.CacheStore} implementation backed by Cassandra database
- */
-
-package org.apache.ignite.cache.store.cassandra;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
deleted file mode 100644
index dd4505811d1cb..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyPersistenceSettings.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.ignite.cache.affinity.AffinityKeyMapped;
-import org.w3c.dom.Element;
-import org.w3c.dom.NodeList;
-
-/**
- * Stores persistence settings for Ignite cache key
- */
-public class KeyPersistenceSettings extends PersistenceSettings<PojoKeyField> {
-    /** Partition key XML tag. */
-    private static final String PARTITION_KEY_ELEMENT = "partitionKey";
-
-    /** Cluster key XML tag. */
-    private static final String CLUSTER_KEY_ELEMENT = "clusterKey";
-
-    /** POJO field XML tag. */
-    private static final String FIELD_ELEMENT = "field";
-
-    /** POJO fields. */
-    private List<PojoKeyField> fields = new LinkedList<>();
-
-    /** Partition key fields. */
-    private List<PojoKeyField> partKeyFields = new LinkedList<>();
-
-    /** Cluster key fields. */
-    private List<PojoKeyField> clusterKeyFields = new LinkedList<>();
-
-    /**
-     * Creates key persistence settings object based on it's XML configuration.
-     *
-     * @param el XML element storing key persistence settings
-     */
-    public KeyPersistenceSettings(Element el) {
-        super(el);
-
-        if (PersistenceStrategy.POJO != getStrategy()) {
-            init();
-
-            return;
-        }
-
-        Element node = el.getElementsByTagName(PARTITION_KEY_ELEMENT) != null ?
-                (Element)el.getElementsByTagName(PARTITION_KEY_ELEMENT).item(0) : null;
-
-        NodeList partKeysNodes = node == null ? null : node.getElementsByTagName(FIELD_ELEMENT);
-
-        node = el.getElementsByTagName(CLUSTER_KEY_ELEMENT) != null ?
-                (Element)el.getElementsByTagName(CLUSTER_KEY_ELEMENT).item(0) : null;
-
-        NodeList clusterKeysNodes = node == null ? null : node.getElementsByTagName(FIELD_ELEMENT);
-
-        if ((partKeysNodes == null || partKeysNodes.getLength() == 0) &&
-                clusterKeysNodes != null && clusterKeysNodes.getLength() > 0) {
-            throw new IllegalArgumentException("It's not allowed to specify cluster key fields mapping, but " +
-                "doesn't specify partition key mappings");
-        }
-
-        // Detecting partition key fields
-        partKeyFields = detectPojoFields(partKeysNodes);
-
-        if (partKeyFields == null || partKeyFields.isEmpty()) {
-            throw new IllegalStateException("Failed to initialize partition key fields for class '" +
-                    getJavaClass().getName() + "'");
-        }
-
-        List<PojoKeyField> filteredFields = new LinkedList<>();
-
-        // Find all fields annotated by @AffinityKeyMapped
-        for (PojoKeyField field : partKeyFields) {
-            if (field.getAnnotation(AffinityKeyMapped.class) != null)
-                filteredFields.add(field);
-        }
-
-        // If there are any fields annotated by @AffinityKeyMapped then all other fields are part of cluster key
-        partKeyFields = !filteredFields.isEmpty() ? filteredFields : partKeyFields;
-
-        // Detecting cluster key fields
-        clusterKeyFields = detectPojoFields(clusterKeysNodes);
-
-        filteredFields = new LinkedList<>();
-
-        // Removing out all fields which are already in partition key fields list
-        for (PojoKeyField field : clusterKeyFields) {
-            if (!PojoField.containsField(partKeyFields, field.getName()))
-                filteredFields.add(field);
-        }
-
-        clusterKeyFields = filteredFields;
-
-        fields = new LinkedList<>();
-        fields.addAll(partKeyFields);
-        fields.addAll(clusterKeyFields);
-
-        checkDuplicates(fields);
-
-        init();
-    }
-
-    /** {@inheritDoc} */
-    @Override public List<PojoKeyField> getFields() {
-        return fields;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected PojoKeyField createPojoField(Element el, Class clazz) {
-        return new PojoKeyField(el, clazz);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected PojoKeyField createPojoField(PojoFieldAccessor accessor) {
-        return new PojoKeyField(accessor);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected PojoKeyField createPojoField(PojoKeyField field, Class clazz) {
-        return new PojoKeyField(field, clazz);
-    }
-
-    /**
-     * Returns Cassandra DDL for primary key.
-     *
-     * @return DDL statement.
-     */
-    public String getPrimaryKeyDDL() {
-        StringBuilder partKey = new StringBuilder();
-
-        List<String> cols = getPartitionKeyColumns();
-        for (String column : cols) {
-            if (partKey.length() != 0)
-                partKey.append(", ");
-
-            partKey.append("\"").append(column).append("\"");
-        }
-
-        StringBuilder clusterKey = new StringBuilder();
-
-        cols = getClusterKeyColumns();
-        if (cols != null) {
-            for (String column : cols) {
-                if (clusterKey.length() != 0)
-                    clusterKey.append(", ");
-
-                clusterKey.append("\"").append(column).append("\"");
-            }
-        }
-
-        return clusterKey.length() == 0 ?
-            "  primary key ((" + partKey + "))" :
-            "  primary key ((" + partKey + "), " + clusterKey + ")";
-    }
-
-    /**
-     * Returns Cassandra DDL for cluster key.
-     *
-     * @return Cluster key DDL.
-     */
-    public String getClusteringDDL() {
-        StringBuilder builder = new StringBuilder();
-
-        for (PojoField field : clusterKeyFields) {
-            PojoKeyField.SortOrder sortOrder = ((PojoKeyField)field).getSortOrder();
-
-            if (sortOrder == null)
-                continue;
-
-            if (builder.length() != 0)
-                builder.append(", ");
-
-            boolean asc = PojoKeyField.SortOrder.ASC == sortOrder;
-
-            builder.append("\"").append(field.getColumn()).append("\" ").append(asc ? "asc" : "desc");
-        }
-
-        return builder.length() == 0 ? null : "clustering order by (" + builder + ")";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String defaultColumnName() {
-        return "key";
-    }
-
-    /**
-     * Returns partition key columns of Cassandra table.
-     *
-     * @return List of column names.
-     */
-    private List<String> getPartitionKeyColumns() {
-        List<String> cols = new LinkedList<>();
-
-        if (PersistenceStrategy.BLOB == getStrategy() || PersistenceStrategy.PRIMITIVE == getStrategy()) {
-            cols.add(getColumn());
-            return cols;
-        }
-
-        if (partKeyFields != null) {
-            for (PojoField field : partKeyFields)
-                cols.add(field.getColumn());
-        }
-
-        return cols;
-    }
-
-    /**
-     * Returns cluster key columns of Cassandra table.
-     *
-     * @return List of column names.
-     */
-    private List<String> getClusterKeyColumns() {
-        List<String> cols = new LinkedList<>();
-
-        if (clusterKeyFields != null) {
-            for (PojoField field : clusterKeyFields)
-                cols.add(field.getColumn());
-        }
-
-        return cols;
-    }
-
-    /**
-     * @see java.io.Serializable
-     */
-    private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
-        in.defaultReadObject();
-
-        fields = enrichFields(fields);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
deleted file mode 100644
index f865674e76442..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/KeyValuePersistenceSettings.java
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Serializable;
-import java.io.StringReader;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.store.cassandra.common.CassandraHelper;
-import org.apache.ignite.cache.store.cassandra.common.SystemHelper;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.springframework.core.io.Resource;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.InputSource;
-
-/**
- * Stores persistence settings for Ignite cache key and value
- */
-public class KeyValuePersistenceSettings implements Serializable {
-    /**
-     * Default Cassandra keyspace options which should be used to create new keyspace.
-     * <ul>
-     * <li> <b>SimpleStrategy</b> for replication work well for single data center Cassandra cluster.<br/>
-     *      If your Cassandra cluster deployed across multiple data centers it's better to use <b>NetworkTopologyStrategy</b>.
-     * </li>
-     * <li> Three replicas will be created for each data block. </li>
-     * <li> Setting DURABLE_WRITES to true specifies that all data should be written to commit log. </li>
-     * </ul>
-     */
-    private static final String DFLT_KEYSPACE_OPTIONS = "replication = {'class' : 'SimpleStrategy', " +
-            "'replication_factor' : 3} and durable_writes = true";
-
-    /** Xml attribute specifying Cassandra keyspace to use. */
-    private static final String KEYSPACE_ATTR = "keyspace";
-
-    /** Xml attribute specifying Cassandra table to use. */
-    private static final String TABLE_ATTR = "table";
-
-    /** Xml attribute specifying ttl (time to leave) for rows inserted in Cassandra. */
-    private static final String TTL_ATTR = "ttl";
-
-    /** Root xml element containing persistence settings specification. */
-    private static final String PERSISTENCE_NODE = "persistence";
-
-    /** Xml element specifying Cassandra keyspace options. */
-    private static final String KEYSPACE_OPTIONS_NODE = "keyspaceOptions";
-
-    /** Xml element specifying Cassandra table options. */
-    private static final String TABLE_OPTIONS_NODE = "tableOptions";
-
-    /** Xml element specifying Ignite cache key persistence settings. */
-    private static final String KEY_PERSISTENCE_NODE = "keyPersistence";
-
-    /** Xml element specifying Ignite cache value persistence settings. */
-    private static final String VALUE_PERSISTENCE_NODE = "valuePersistence";
-
-    /**
-     * TTL (time to leave) for rows inserted into Cassandra table
-     * {@link <a href="https://docs.datastax.com/en/cql/3.1/cql/cql_using/use_expire_c.html">Expiring data</a>}.
-     */
-    private Integer ttl;
-
-    /** Cassandra keyspace (analog of tablespace in relational databases). */
-    private String keyspace;
-
-    /** Cassandra table. */
-    private String tbl;
-
-    /**
-     * Cassandra table creation options
-     * {@link <a href="https://docs.datastax.com/en/cql/3.0/cql/cql_reference/create_table_r.html">CREATE TABLE</a>}.
-     */
-    private String tblOptions;
-
-    /**
-     * Cassandra keyspace creation options
-     * {@link <a href="https://docs.datastax.com/en/cql/3.0/cql/cql_reference/create_keyspace_r.html">CREATE KEYSPACE</a>}.
-     */
-    private String keyspaceOptions = DFLT_KEYSPACE_OPTIONS;
-
-    /** Persistence settings for Ignite cache keys. */
-    private KeyPersistenceSettings keyPersistenceSettings;
-
-    /** Persistence settings for Ignite cache values. */
-    private ValuePersistenceSettings valPersistenceSettings;
-
-    /** List of Cassandra table columns */
-    private List<String> tableColumns;
-
-    /**
-     * Constructs Ignite cache key/value persistence settings.
-     *
-     * @param settings string containing xml with persistence settings for Ignite cache key/value
-     */
-    public KeyValuePersistenceSettings(String settings) {
-        init(settings);
-    }
-
-    /**
-     * Constructs Ignite cache key/value persistence settings.
-     *
-     * @param settingsFile xml file with persistence settings for Ignite cache key/value
-     */
-    public KeyValuePersistenceSettings(File settingsFile) {
-        InputStream in;
-
-        try {
-            in = new FileInputStream(settingsFile);
-        }
-        catch (IOException e) {
-            throw new IgniteException("Failed to get input stream for Cassandra persistence settings file: " +
-                    settingsFile.getAbsolutePath(), e);
-        }
-
-        init(loadSettings(in));
-    }
-
-    /**
-     * Constructs Ignite cache key/value persistence settings.
-     *
-     * @param settingsRsrc resource containing xml with persistence settings for Ignite cache key/value
-     */
-    public KeyValuePersistenceSettings(Resource settingsRsrc) {
-        InputStream in;
-
-        try {
-            in = settingsRsrc.getInputStream();
-        }
-        catch (IOException e) {
-            throw new IgniteException("Failed to get input stream for Cassandra persistence settings resource: " + settingsRsrc, e);
-        }
-
-        init(loadSettings(in));
-    }
-
-    /**
-     * Returns ttl to use for while inserting new rows into Cassandra table.
-     *
-     * @return ttl
-     */
-    public Integer getTTL() {
-        return ttl;
-    }
-
-    /**
-     * Returns Cassandra keyspace to use.
-     *
-     * @return keyspace.
-     */
-    public String getKeyspace() {
-        return keyspace;
-    }
-
-    /**
-     * Returns Cassandra table to use.
-     *
-     * @return table.
-     */
-    public String getTable() {
-        return tbl;
-    }
-
-    /**
-     * Returns persistence settings for Ignite cache keys.
-     *
-     * @return keys persistence settings.
-     */
-    public KeyPersistenceSettings getKeyPersistenceSettings() {
-        return keyPersistenceSettings;
-    }
-
-    /**
-     * Returns persistence settings for Ignite cache values.
-     *
-     * @return values persistence settings.
-     */
-    public ValuePersistenceSettings getValuePersistenceSettings() {
-        return valPersistenceSettings;
-    }
-
-    /**
-     * Returns list of POJO fields to be mapped to Cassandra table columns.
-     *
-     * @return POJO fields list.
-     */
-    public List<PojoField> getFields() {
-        List<PojoField> fields = new LinkedList<>();
-
-        for (PojoField field : keyPersistenceSettings.getFields())
-            fields.add(field);
-
-        for (PojoField field : valPersistenceSettings.getFields())
-            fields.add(field);
-
-        return fields;
-    }
-
-    /**
-     * Returns list of Ignite cache key POJO fields to be mapped to Cassandra table columns.
-     *
-     * @return POJO fields list.
-     */
-    public List<PojoKeyField> getKeyFields() {
-        return keyPersistenceSettings.getFields();
-    }
-
-    /**
-     * Returns list of Ignite cache value POJO fields to be mapped to Cassandra table columns.
-     *
-     * @return POJO fields list.
-     */
-    public List<PojoValueField> getValueFields() {
-        return valPersistenceSettings.getFields();
-    }
-
-    /**
-     * Returns DDL statement to create Cassandra keyspace.
-     *
-     * @return Keyspace DDL statement.
-     */
-    public String getKeyspaceDDLStatement() {
-        StringBuilder builder = new StringBuilder();
-        builder.append("create keyspace if not exists \"").append(keyspace).append("\"");
-
-        if (keyspaceOptions != null) {
-            if (!keyspaceOptions.trim().toLowerCase().startsWith("with"))
-                builder.append("\nwith");
-
-            builder.append(" ").append(keyspaceOptions);
-        }
-
-        String statement = builder.toString().trim().replaceAll(" +", " ");
-
-        return statement.endsWith(";") ? statement : statement + ";";
-    }
-
-    /**
-     * Returns column names for Cassandra table.
-     *
-     * @return Column names.
-     */
-    public List<String> getTableColumns() {
-        return tableColumns;
-    }
-
-    /**
-     * Returns DDL statement to create Cassandra table.
-     *
-     * @param table Table name.
-     * @return Table DDL statement.
-     */
-    public String getTableDDLStatement(String table) {
-        if (table == null || table.trim().isEmpty())
-            throw new IllegalArgumentException("Table name should be specified");
-
-        String keyColumnsDDL = keyPersistenceSettings.getTableColumnsDDL();
-        String valColumnsDDL = valPersistenceSettings.getTableColumnsDDL(new HashSet<>(keyPersistenceSettings.getTableColumns()));
-
-        String colsDDL = keyColumnsDDL;
-
-        if (valColumnsDDL != null && !valColumnsDDL.trim().isEmpty())
-            colsDDL += ",\n" + valColumnsDDL;
-
-        String primaryKeyDDL = keyPersistenceSettings.getPrimaryKeyDDL();
-
-        String clusteringDDL = keyPersistenceSettings.getClusteringDDL();
-
-        String optionsDDL = tblOptions != null && !tblOptions.trim().isEmpty() ? tblOptions.trim() : "";
-
-        if (clusteringDDL != null && !clusteringDDL.isEmpty())
-            optionsDDL = optionsDDL.isEmpty() ? clusteringDDL : optionsDDL + " and " + clusteringDDL;
-
-        if (!optionsDDL.trim().isEmpty())
-            optionsDDL = optionsDDL.trim().toLowerCase().startsWith("with") ? optionsDDL.trim() : "with " + optionsDDL.trim();
-
-        StringBuilder builder = new StringBuilder();
-
-        builder.append("create table if not exists \"").append(keyspace).append("\".\"").append(table).append("\"");
-        builder.append("\n(\n").append(colsDDL).append(",\n").append(primaryKeyDDL).append("\n)");
-
-        if (!optionsDDL.isEmpty())
-            builder.append(" \n").append(optionsDDL);
-
-        String tblDDL = builder.toString().trim().replaceAll(" +", " ");
-
-        return tblDDL.endsWith(";") ? tblDDL : tblDDL + ";";
-    }
-
-    /**
-     * Returns DDL statements to create Cassandra table secondary indexes.
-     *
-     * @param table Table name.
-     * @return DDL statements to create secondary indexes.
-     */
-    public List<String> getIndexDDLStatements(String table) {
-        List<String> idxDDLs = new LinkedList<>();
-
-        Set<String> keyCols = new HashSet<>(keyPersistenceSettings.getTableColumns());
-
-        List<PojoValueField> fields = valPersistenceSettings.getFields();
-
-        for (PojoField field : fields) {
-            if (!keyCols.contains(field.getColumn()) && ((PojoValueField)field).isIndexed())
-                idxDDLs.add(((PojoValueField)field).getIndexDDL(keyspace, table));
-        }
-
-        return idxDDLs;
-    }
-
-    /**
-     * Loads Ignite cache persistence settings from resource.
-     *
-     * @param in Input stream.
-     * @return String containing xml with Ignite cache persistence settings.
-     */
-    private String loadSettings(InputStream in) {
-        StringBuilder settings = new StringBuilder();
-        BufferedReader reader = null;
-
-        try {
-            reader = new BufferedReader(new InputStreamReader(in));
-
-            String line = reader.readLine();
-
-            while (line != null) {
-                if (settings.length() != 0)
-                    settings.append(SystemHelper.LINE_SEPARATOR);
-
-                settings.append(line);
-
-                line = reader.readLine();
-            }
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Failed to read input stream for Cassandra persistence settings", e);
-        }
-        finally {
-            U.closeQuiet(reader);
-            U.closeQuiet(in);
-        }
-
-        return settings.toString();
-    }
-
-    /**
-     * @param elem Element with data.
-     * @param attr Attribute name.
-     * @return Numeric value for specified attribute.
-     */
-    private int extractIntAttribute(Element elem, String attr) {
-        String val = elem.getAttribute(attr).trim();
-
-        try {
-            return Integer.parseInt(val);
-        }
-        catch (NumberFormatException ignored) {
-            throw new IllegalArgumentException("Incorrect value '" + val + "' specified for '" + attr + "' attribute");
-        }
-    }
-
-    /**
-     * Initializes persistence settings from XML string.
-     *
-     * @param settings XML string containing Ignite cache persistence settings configuration.
-     */
-    @SuppressWarnings("IfCanBeSwitch")
-    private void init(String settings) {
-        Document doc;
-
-        try {
-            DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
-            DocumentBuilder builder = factory.newDocumentBuilder();
-            doc = builder.parse(new InputSource(new StringReader(settings)));
-        }
-        catch (Throwable e) {
-            throw new IllegalArgumentException("Failed to parse persistence settings:" +
-                SystemHelper.LINE_SEPARATOR + settings, e);
-        }
-
-        Element root = doc.getDocumentElement();
-
-        if (!PERSISTENCE_NODE.equals(root.getNodeName())) {
-            throw new IllegalArgumentException("Incorrect persistence settings specified. " +
-                "Root XML element should be 'persistence'");
-        }
-
-        if (!root.hasAttribute(KEYSPACE_ATTR)) {
-            throw new IllegalArgumentException("Incorrect persistence settings '" + KEYSPACE_ATTR +
-                "' attribute should be specified");
-        }
-
-        keyspace = root.getAttribute(KEYSPACE_ATTR).trim();
-        tbl = root.hasAttribute(TABLE_ATTR) ? root.getAttribute(TABLE_ATTR).trim() : null;
-
-        if (root.hasAttribute(TTL_ATTR))
-            ttl = extractIntAttribute(root, TTL_ATTR);
-
-        if (!root.hasChildNodes()) {
-            throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
-                "there are no key and value persistence settings specified");
-        }
-
-        NodeList children = root.getChildNodes();
-        int cnt = children.getLength();
-
-        for (int i = 0; i < cnt; i++) {
-            Node node = children.item(i);
-
-            if (node.getNodeType() != Node.ELEMENT_NODE)
-                continue;
-
-            Element el = (Element)node;
-            String nodeName = el.getNodeName();
-
-            if (nodeName.equals(TABLE_OPTIONS_NODE)) {
-                tblOptions = el.getTextContent();
-                tblOptions = tblOptions.replace("\n", " ").replace("\r", "").replace("\t", " ");
-            }
-            else if (nodeName.equals(KEYSPACE_OPTIONS_NODE)) {
-                keyspaceOptions = el.getTextContent();
-                keyspaceOptions = keyspaceOptions.replace("\n", " ").replace("\r", "").replace("\t", " ");
-            }
-            else if (nodeName.equals(KEY_PERSISTENCE_NODE))
-                keyPersistenceSettings = new KeyPersistenceSettings(el);
-            else if (nodeName.equals(VALUE_PERSISTENCE_NODE))
-                valPersistenceSettings = new ValuePersistenceSettings(el);
-        }
-
-        if (keyPersistenceSettings == null) {
-            throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
-                "there are no key persistence settings specified");
-        }
-
-        if (valPersistenceSettings == null) {
-            throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
-                "there are no value persistence settings specified");
-        }
-
-        List<PojoKeyField> keyFields = keyPersistenceSettings.getFields();
-        List<PojoValueField> valFields = valPersistenceSettings.getFields();
-
-        if (PersistenceStrategy.POJO == keyPersistenceSettings.getStrategy() &&
-            (keyFields == null || keyFields.isEmpty())) {
-            throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
-                "there are no key fields found");
-        }
-
-        if (PersistenceStrategy.POJO == valPersistenceSettings.getStrategy() &&
-            (valFields == null || valFields.isEmpty())) {
-            throw new IllegalArgumentException("Incorrect Cassandra persistence settings specification, " +
-                "there are no value fields found");
-        }
-
-        // Validating aliases compatibility - fields having different names, but mapped to the same Cassandra table column.
-        if (valFields != null && !valFields.isEmpty()) {
-            String keyColumn = keyPersistenceSettings.getColumn();
-            Class keyClass = keyPersistenceSettings.getJavaClass();
-
-            if (keyColumn != null && !keyColumn.isEmpty()) {
-                for (PojoField valField : valFields) {
-                    if (keyColumn.equals(valField.getColumn()) &&
-                            !CassandraHelper.isCassandraCompatibleTypes(keyClass, valField.getJavaClass())) {
-                        throw new IllegalArgumentException("Value field '" + valField.getName() + "' shares the same " +
-                                "Cassandra table column '" + keyColumn + "' with key, but their Java classes are " +
-                                "different. Fields sharing the same column should have the same Java class as their " +
-                                "type or should be mapped to the same Cassandra primitive type.");
-                    }
-                }
-            }
-
-            if (keyFields != null && !keyFields.isEmpty()) {
-                for (PojoField keyField : keyFields) {
-                    for (PojoField valField : valFields) {
-                        if (keyField.getColumn().equals(valField.getColumn()) &&
-                                !CassandraHelper.isCassandraCompatibleTypes(keyField.getJavaClass(), valField.getJavaClass())) {
-                            throw new IllegalArgumentException("Value field '" + valField.getName() + "' shares the same " +
-                                    "Cassandra table column '" + keyColumn + "' with key field '" + keyField.getName() + "', " +
-                                    "but their Java classes are different. Fields sharing the same column should have " +
-                                    "the same Java class as their type or should be mapped to the same Cassandra " +
-                                    "primitive type.");
-                        }
-                    }
-                }
-            }
-        }
-
-        tableColumns = new LinkedList<>();
-
-        for (String column : keyPersistenceSettings.getTableColumns()) {
-            if (!tableColumns.contains(column))
-                tableColumns.add(column);
-        }
-
-        for (String column : valPersistenceSettings.getTableColumns()) {
-            if (!tableColumns.contains(column))
-                tableColumns.add(column);
-        }
-
-        tableColumns = Collections.unmodifiableList(tableColumns);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java
deleted file mode 100644
index 59e066784ee94..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceController.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.Row;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
-import org.apache.ignite.cache.store.cassandra.serializer.Serializer;
-
-/**
- * Intermediate layer between persistent store (Cassandra) and Ignite cache key/value classes.
- * Handles  all the mappings to/from Java classes into Cassandra and responsible for all the details
- * of how Java objects should be written/loaded to/from Cassandra.
- */
-public class PersistenceController {
-    /** Ignite cache key/value persistence settings. */
-    private final KeyValuePersistenceSettings persistenceSettings;
-
-    /** List of key unique POJO fields (skipping aliases pointing to the same Cassandra table column). */
-    private final List<? extends PojoField> keyUniquePojoFields;
-
-    /** List of value unique POJO fields (skipping aliases pointing to the same Cassandra table column). */
-    private final List<? extends PojoField> valUniquePojoFields;
-
-    /** CQL statement template to insert row into Cassandra table. */
-    private final String writeStatementTempl;
-
-    /** CQL statement template to delete row from Cassandra table. */
-    private final String delStatementTempl;
-
-    /** CQL statement template to select value fields from Cassandra table. */
-    private final String loadStatementTempl;
-
-    /** CQL statement template to select key/value fields from Cassandra table. */
-    private final String loadWithKeyFieldsStatementTempl;
-
-    /** CQL statements to insert row into Cassandra table. */
-    private volatile Map<String, String> writeStatements = new HashMap<>();
-
-    /** CQL statements to delete row from Cassandra table. */
-    private volatile Map<String, String> delStatements = new HashMap<>();
-
-    /** CQL statements to select value fields from Cassandra table. */
-    private volatile Map<String, String> loadStatements = new HashMap<>();
-
-    /** CQL statements to select key/value fields from Cassandra table. */
-    private volatile Map<String, String> loadWithKeyFieldsStatements = new HashMap<>();
-
-    /**
-     * Constructs persistence controller from Ignite cache persistence settings.
-     *
-     * @param settings persistence settings.
-     */
-    public PersistenceController(KeyValuePersistenceSettings settings) {
-        if (settings == null)
-            throw new IllegalArgumentException("Persistent settings can't be null");
-
-        persistenceSettings = settings;
-
-        String[] loadStatements = prepareLoadStatements();
-
-        loadWithKeyFieldsStatementTempl = loadStatements[0];
-        loadStatementTempl = loadStatements[1];
-        writeStatementTempl = prepareWriteStatement();
-        delStatementTempl = prepareDeleteStatement();
-
-        keyUniquePojoFields = settings.getKeyPersistenceSettings().cassandraUniqueFields();
-
-        List<? extends PojoField> _valUniquePojoFields = settings.getValuePersistenceSettings().cassandraUniqueFields();
-
-        if (_valUniquePojoFields == null || _valUniquePojoFields.isEmpty()) {
-            valUniquePojoFields = _valUniquePojoFields;
-
-            return;
-        }
-
-        List<String> keyColumns = new LinkedList<>();
-
-        if (keyUniquePojoFields == null)
-            keyColumns.add(settings.getKeyPersistenceSettings().getColumn());
-        else {
-            for (PojoField field : keyUniquePojoFields)
-                keyColumns.add(field.getColumn());
-        }
-
-        List<PojoField> fields = new LinkedList<>(_valUniquePojoFields);
-
-        for (String column : keyColumns) {
-            for (int i = 0; i < fields.size(); i++) {
-                if (column.equals(fields.get(i).getColumn())) {
-                    fields.remove(i);
-                    break;
-                }
-            }
-        }
-
-        valUniquePojoFields = fields.isEmpty() ? null : Collections.unmodifiableList(fields);
-    }
-
-    /**
-     * Returns Ignite cache persistence settings.
-     *
-     * @return persistence settings.
-     */
-    public KeyValuePersistenceSettings getPersistenceSettings() {
-        return persistenceSettings;
-    }
-
-    /**
-     * Returns CQL statement to insert row into Cassandra table.
-     *
-     * @param table Table name.
-     * @return CQL statement.
-     */
-    public String getWriteStatement(String table) {
-        return getStatement(table, writeStatementTempl, writeStatements);
-    }
-
-    /**
-     * Returns CQL statement to delete row from Cassandra table.
-     *
-     * @param table Table name.
-     * @return CQL statement.
-     */
-    public String getDeleteStatement(String table) {
-        return getStatement(table, delStatementTempl, delStatements);
-    }
-
-    /**
-     * Returns CQL statement to select key/value fields from Cassandra table.
-     *
-     * @param table Table name.
-     * @param includeKeyFields whether to include/exclude key fields from the returned row.
-     *
-     * @return CQL statement.
-     */
-    public String getLoadStatement(String table, boolean includeKeyFields) {
-        return includeKeyFields ?
-            getStatement(table, loadWithKeyFieldsStatementTempl, loadWithKeyFieldsStatements) :
-            getStatement(table, loadStatementTempl, loadStatements);
-    }
-
-    /**
-     * Binds Ignite cache key object to {@link PreparedStatement}.
-     *
-     * @param statement statement to which key object should be bind.
-     * @param key key object.
-     *
-     * @return statement with bounded key.
-     */
-    public BoundStatement bindKey(PreparedStatement statement, Object key) {
-        PersistenceSettings settings = persistenceSettings.getKeyPersistenceSettings();
-
-        Object[] values = PersistenceStrategy.POJO != settings.getStrategy() ?
-            new Object[1] : new Object[keyUniquePojoFields.size()];
-
-        bindValues(settings.getStrategy(), settings.getSerializer(), keyUniquePojoFields, key, values, 0);
-
-        return statement.bind(values);
-    }
-
-    /**
-     * Binds Ignite cache key and value object to {@link com.datastax.driver.core.PreparedStatement}.
-     *
-     * @param statement statement to which key and value object should be bind.
-     * @param key key object.
-     * @param val value object.
-     *
-     * @return statement with bounded key and value.
-     */
-    public BoundStatement bindKeyValue(PreparedStatement statement, Object key, Object val) {
-        Object[] values = new Object[persistenceSettings.getTableColumns().size()];
-
-        PersistenceSettings keySettings = persistenceSettings.getKeyPersistenceSettings();
-        PersistenceSettings valSettings = persistenceSettings.getValuePersistenceSettings();
-
-        int offset = bindValues(keySettings.getStrategy(), keySettings.getSerializer(), keyUniquePojoFields, key, values, 0);
-        bindValues(valSettings.getStrategy(), valSettings.getSerializer(), valUniquePojoFields, val, values, offset);
-
-        return statement.bind(values);
-    }
-
-    /**
-     * Builds Ignite cache key object from returned Cassandra table row.
-     *
-     * @param row Cassandra table row.
-     *
-     * @return key object.
-     */
-    public Object buildKeyObject(Row row) {
-        return buildObject(row, persistenceSettings.getKeyPersistenceSettings());
-    }
-
-    /**
-     * Builds Ignite cache value object from Cassandra table row .
-     *
-     * @param row Cassandra table row.
-     *
-     * @return value object.
-     */
-    public Object buildValueObject(Row row) {
-        return buildObject(row, persistenceSettings.getValuePersistenceSettings());
-    }
-
-    /**
-     * Service method to prepare CQL write statement.
-     *
-     * @return CQL write statement.
-     */
-    private String prepareWriteStatement() {
-        Collection<String> cols = persistenceSettings.getTableColumns();
-
-        StringBuilder colsList = new StringBuilder();
-        StringBuilder questionsList = new StringBuilder();
-
-        for (String column : cols) {
-            if (colsList.length() != 0) {
-                colsList.append(", ");
-                questionsList.append(",");
-            }
-
-            colsList.append("\"").append(column).append("\"");
-            questionsList.append("?");
-        }
-
-        String statement = "insert into \"" + persistenceSettings.getKeyspace() + "\".\"%1$s" +
-            "\" (" + colsList + ") values (" + questionsList + ")";
-
-        if (persistenceSettings.getTTL() != null)
-            statement += " using ttl " + persistenceSettings.getTTL();
-
-        return statement + ";";
-    }
-
-    /**
-     * Service method to prepare CQL delete statement.
-     *
-     * @return CQL write statement.
-     */
-    private String prepareDeleteStatement() {
-        Collection<String> cols = persistenceSettings.getKeyPersistenceSettings().getTableColumns();
-
-        StringBuilder statement = new StringBuilder();
-
-        for (String column : cols) {
-            if (statement.length() != 0)
-                statement.append(" and ");
-
-            statement.append("\"").append(column).append("\"=?");
-        }
-
-        statement.append(";");
-
-        return "delete from \"" + persistenceSettings.getKeyspace() + "\".\"%1$s\" where " + statement;
-    }
-
-    /**
-     * Service method to prepare CQL load statements including and excluding key columns.
-     *
-     * @return array having two CQL statements (including and excluding key columns).
-     */
-    private String[] prepareLoadStatements() {
-        PersistenceSettings settings = persistenceSettings.getKeyPersistenceSettings();
-        boolean pojoStrategy = PersistenceStrategy.POJO == settings.getStrategy();
-        Collection<String> keyCols = settings.getTableColumns();
-        StringBuilder hdrWithKeyFields = new StringBuilder();
-
-        for (String column : keyCols) {
-            // omit calculated fields in load statement
-            if (pojoStrategy && settings.getFieldByColumn(column).calculatedField())
-                continue;
-
-            if (hdrWithKeyFields.length() > 0)
-                hdrWithKeyFields.append(", ");
-
-            hdrWithKeyFields.append("\"").append(column).append("\"");
-        }
-
-        settings = persistenceSettings.getValuePersistenceSettings();
-        pojoStrategy = PersistenceStrategy.POJO == settings.getStrategy();
-        Collection<String> valCols = settings.getTableColumns();
-        StringBuilder hdr = new StringBuilder();
-
-        for (String column : valCols) {
-            // omit calculated fields in load statement
-            if (pojoStrategy && settings.getFieldByColumn(column).calculatedField())
-                continue;
-
-            if (hdr.length() > 0)
-                hdr.append(", ");
-
-            hdr.append("\"").append(column).append("\"");
-
-            if (!keyCols.contains(column))
-                hdrWithKeyFields.append(", \"").append(column).append("\"");
-        }
-
-        hdrWithKeyFields.insert(0, "select ");
-        hdr.insert(0, "select ");
-
-        StringBuilder statement = new StringBuilder();
-
-        statement.append(" from \"");
-        statement.append(persistenceSettings.getKeyspace());
-        statement.append("\".\"%1$s");
-        statement.append("\" where ");
-
-        int i = 0;
-
-        for (String column : keyCols) {
-            if (i > 0)
-                statement.append(" and ");
-
-            statement.append("\"").append(column).append("\"=?");
-            i++;
-        }
-
-        statement.append(";");
-
-        return new String[] {hdrWithKeyFields + statement.toString(), hdr + statement.toString()};
-    }
-
-    /**
-     * @param table Table.
-     * @param template Template.
-     * @param statements Statements.
-     * @return Statement.
-     */
-    private String getStatement(final String table, final String template, final Map<String, String> statements) {
-        //noinspection SynchronizationOnLocalVariableOrMethodParameter
-        synchronized (statements) {
-            String st = statements.get(table);
-
-            if (st == null) {
-                st = String.format(template, table);
-                statements.put(table, st);
-            }
-
-            return st;
-        }
-    }
-
-    /**
-     * Builds object from Cassandra table row.
-     *
-     * @param row Cassandra table row.
-     * @param settings persistence settings to use.
-     *
-     * @return object.
-     */
-    private Object buildObject(Row row, PersistenceSettings settings) {
-        if (row == null)
-            return null;
-
-        PersistenceStrategy stg = settings.getStrategy();
-
-        Class clazz = settings.getJavaClass();
-        String col = settings.getColumn();
-
-        if (PersistenceStrategy.PRIMITIVE == stg)
-            return PropertyMappingHelper.getCassandraColumnValue(row, col, clazz, null);
-
-        if (PersistenceStrategy.BLOB == stg)
-            return settings.getSerializer().deserialize(row.getBytes(col));
-
-        List<PojoField> fields = settings.getFields();
-
-        Object obj;
-
-        try {
-            obj = clazz.newInstance();
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Failed to instantiate object of type '" + clazz.getName() + "' using reflection", e);
-        }
-
-        for (PojoField field : fields) {
-            if (!field.calculatedField())
-                field.setValueFromRow(row, obj, settings.getSerializer());
-        }
-
-        return obj;
-    }
-
-    /**
-     * Extracts field values from POJO object, converts into Java types
-     * which could be mapped to Cassandra types and stores them inside provided values
-     * array starting from specified offset.
-     *
-     * @param stgy Persistence strategy to use.
-     * @param serializer Serializer to use for BLOBs.
-     * @param fields Fields who's values should be extracted.
-     * @param obj Object instance who's field values should be extracted.
-     * @param values Array to store values.
-     * @param offset Offset starting from which to store fields values in the provided values array.
-     *
-     * @return next offset
-     */
-    private int bindValues(PersistenceStrategy stgy, Serializer serializer, List<? extends PojoField> fields, Object obj,
-                            Object[] values, int offset) {
-        if (PersistenceStrategy.PRIMITIVE == stgy) {
-            if (PropertyMappingHelper.getCassandraType(obj.getClass()) == null ||
-                obj.getClass().equals(ByteBuffer.class) || obj instanceof byte[]) {
-                throw new IllegalArgumentException("Couldn't deserialize instance of class '" +
-                    obj.getClass().getName() + "' using PRIMITIVE strategy. Please use BLOB strategy for this case.");
-            }
-
-            values[offset] = obj;
-
-            return ++offset;
-        }
-
-        if (PersistenceStrategy.BLOB == stgy) {
-            values[offset] = serializer.serialize(obj);
-
-            return ++offset;
-        }
-
-        if (fields == null || fields.isEmpty())
-            return offset;
-
-        for (PojoField field : fields) {
-            Object val = field.getValueFromObject(obj, serializer);
-
-            if (val instanceof byte[])
-                val = ByteBuffer.wrap((byte[])val);
-
-            values[offset] = val;
-
-            offset++;
-        }
-
-        return offset;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
deleted file mode 100644
index 6a0d703a30a39..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceSettings.java
+++ /dev/null
@@ -1,557 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import java.beans.PropertyDescriptor;
-import java.io.IOException;
-import java.io.Serializable;
-import java.lang.reflect.Field;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-import com.datastax.driver.core.DataType;
-import org.apache.commons.beanutils.PropertyUtils;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-import org.apache.ignite.cache.store.cassandra.common.CassandraHelper;
-import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
-import org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer;
-import org.apache.ignite.cache.store.cassandra.serializer.Serializer;
-import org.w3c.dom.Element;
-import org.w3c.dom.NodeList;
-
-/**
- * Stores persistence settings, which describes how particular key/value
- * from Ignite cache should be stored in Cassandra.
- */
-public abstract class PersistenceSettings<F extends PojoField> implements Serializable {
-    /** Xml attribute specifying persistence strategy. */
-    private static final String STRATEGY_ATTR = "strategy";
-
-    /** Xml attribute specifying Cassandra column name. */
-    private static final String COLUMN_ATTR = "column";
-
-    /** Xml attribute specifying BLOB serializer to use. */
-    private static final String SERIALIZER_ATTR = "serializer";
-
-    /** Xml attribute specifying java class of the object to be persisted. */
-    private static final String CLASS_ATTR = "class";
-
-    /** Persistence strategy to use. */
-    private PersistenceStrategy stgy;
-
-    /** Java class of the object to be persisted. */
-    private Class javaCls;
-
-    /** Cassandra table column name where object should be persisted in
-     *  case of using BLOB or PRIMITIVE persistence strategy. */
-    private String col;
-
-    /** Serializer for BLOBs. */
-    private Serializer serializer = new JavaSerializer();
-
-    /** List of Cassandra table columns */
-    private List<String> tableColumns;
-
-    /**
-     * List of POJO fields having unique mapping to Cassandra columns - skipping aliases pointing
-     *  to the same Cassandra table column.
-     */
-    private List<F> casUniqueFields;
-
-    /**
-     * Extracts property descriptor from the descriptors list by its name.
-     *
-     * @param descriptors descriptors list.
-     * @param propName property name.
-     *
-     * @return property descriptor.
-     */
-    public static PropertyDescriptor findPropertyDescriptor(List<PropertyDescriptor> descriptors, String propName) {
-        if (descriptors == null || descriptors.isEmpty() || propName == null || propName.trim().isEmpty())
-            return null;
-
-        for (PropertyDescriptor descriptor : descriptors) {
-            if (descriptor.getName().equals(propName))
-                return descriptor;
-        }
-
-        return null;
-    }
-
-    /**
-     * Constructs persistence settings from corresponding XML element.
-     *
-     * @param el xml element containing persistence settings configuration.
-     */
-    @SuppressWarnings("unchecked")
-    public PersistenceSettings(Element el) {
-        if (el == null)
-            throw new IllegalArgumentException("DOM element representing key/value persistence object can't be null");
-
-        if (!el.hasAttribute(STRATEGY_ATTR)) {
-            throw new IllegalArgumentException("DOM element representing key/value persistence object should have '" +
-                STRATEGY_ATTR + "' attribute");
-        }
-
-        try {
-            stgy = PersistenceStrategy.valueOf(el.getAttribute(STRATEGY_ATTR).trim().toUpperCase());
-        }
-        catch (IllegalArgumentException ignored) {
-            throw new IllegalArgumentException("Incorrect persistence strategy specified: " + el.getAttribute(STRATEGY_ATTR));
-        }
-
-        if (!el.hasAttribute(CLASS_ATTR) && PersistenceStrategy.BLOB != stgy) {
-            throw new IllegalArgumentException("DOM element representing key/value persistence object should have '" +
-                CLASS_ATTR + "' attribute or have BLOB persistence strategy");
-        }
-
-        try {
-            javaCls = el.hasAttribute(CLASS_ATTR) ? getClassInstance(el.getAttribute(CLASS_ATTR).trim()) : null;
-        }
-        catch (Throwable e) {
-            throw new IllegalArgumentException("Incorrect java class specified '" + el.getAttribute(CLASS_ATTR) + "' " +
-                "for Cassandra persistence", e);
-        }
-
-        if (PersistenceStrategy.BLOB != stgy &&
-            (ByteBuffer.class.equals(javaCls) || byte[].class.equals(javaCls))) {
-            throw new IllegalArgumentException("Java class '" + el.getAttribute(CLASS_ATTR) + "' " +
-                "specified could only be persisted using BLOB persistence strategy");
-        }
-
-        if (PersistenceStrategy.PRIMITIVE == stgy &&
-            PropertyMappingHelper.getCassandraType(javaCls) == null) {
-            throw new IllegalArgumentException("Current implementation doesn't support persisting '" +
-                javaCls.getName() + "' object using PRIMITIVE strategy");
-        }
-
-        if (PersistenceStrategy.POJO == stgy) {
-            if (javaCls == null)
-                throw new IllegalStateException("Object java class should be specified for POJO persistence strategy");
-
-            try {
-                javaCls.getConstructor();
-            }
-            catch (Throwable e) {
-                throw new IllegalArgumentException("Java class '" + javaCls.getName() + "' couldn't be used as POJO " +
-                    "cause it doesn't have no arguments constructor", e);
-            }
-        }
-
-        if (el.hasAttribute(COLUMN_ATTR)) {
-            if (PersistenceStrategy.BLOB != stgy && PersistenceStrategy.PRIMITIVE != stgy) {
-                throw new IllegalArgumentException("Incorrect configuration of Cassandra key/value persistence settings, " +
-                    "'" + COLUMN_ATTR + "' attribute is only applicable for PRIMITIVE or BLOB strategy");
-            }
-
-            col = el.getAttribute(COLUMN_ATTR).trim();
-        }
-
-        if (el.hasAttribute(SERIALIZER_ATTR)) {
-            if (PersistenceStrategy.BLOB != stgy && PersistenceStrategy.POJO != stgy) {
-                throw new IllegalArgumentException("Incorrect configuration of Cassandra key/value persistence settings, " +
-                    "'" + SERIALIZER_ATTR + "' attribute is only applicable for BLOB and POJO strategies");
-            }
-
-            Object obj = newObjectInstance(el.getAttribute(SERIALIZER_ATTR).trim());
-
-            if (!(obj instanceof Serializer)) {
-                throw new IllegalArgumentException("Incorrect configuration of Cassandra key/value persistence settings, " +
-                    "serializer class '" + el.getAttribute(SERIALIZER_ATTR) + "' doesn't implement '" +
-                    Serializer.class.getName() + "' interface");
-            }
-
-            serializer = (Serializer)obj;
-        }
-
-        if ((PersistenceStrategy.BLOB == stgy || PersistenceStrategy.PRIMITIVE == stgy) && col == null)
-            col = defaultColumnName();
-    }
-
-    /**
-     * Returns java class of the object to be persisted.
-     *
-     * @return java class.
-     */
-    public Class getJavaClass() {
-        return javaCls;
-    }
-
-    /**
-     * Returns persistence strategy to use.
-     *
-     * @return persistence strategy.
-     */
-    public PersistenceStrategy getStrategy() {
-        return stgy;
-    }
-
-    /**
-     * Returns Cassandra table column name where object should be persisted in
-     * case of using BLOB or PRIMITIVE persistence strategy.
-     *
-     * @return column name.
-     */
-    public String getColumn() {
-        return col;
-    }
-
-    /**
-     * Returns serializer to be used for BLOBs.
-     *
-     * @return serializer.
-     */
-    public Serializer getSerializer() {
-        return serializer;
-    }
-
-    /**
-     * Returns a list of POJO fields to be persisted.
-     *
-     * @return list of fields.
-     */
-    public abstract List<F> getFields();
-
-    /**
-     * Returns POJO field by Cassandra table column name.
-     *
-     * @param column column name.
-     *
-     * @return POJO field or null if not exists.
-     */
-    public PojoField getFieldByColumn(String column) {
-        List<F> fields = getFields();
-
-        if (fields == null || fields.isEmpty())
-            return null;
-
-        for (PojoField field : fields) {
-            if (field.getColumn().equals(column))
-                return field;
-        }
-
-        return null;
-    }
-
-    /**
-     * List of POJO fields having unique mapping to Cassandra columns - skipping aliases pointing
-     * to the same Cassandra table column.
-     *
-     * @return List of fields.
-     */
-    public List<F> cassandraUniqueFields() {
-        return casUniqueFields;
-    }
-
-    /**
-     * Returns set of database column names, used to persist field values
-     *
-     * @return set of database column names
-     */
-    public List<String> getTableColumns() {
-        return tableColumns;
-    }
-
-    /**
-     * Returns Cassandra table columns DDL, corresponding to POJO fields which should be persisted.
-     *
-     * @return DDL statement for Cassandra table fields.
-     */
-    public String getTableColumnsDDL() {
-        return getTableColumnsDDL(null);
-    }
-
-    /**
-     * Returns Cassandra table columns DDL, corresponding to POJO fields which should be persisted.
-     *
-     * @param ignoreColumns Table columns to ignore (exclude) from DDL.
-     * @return DDL statement for Cassandra table fields.
-     */
-    public String getTableColumnsDDL(Set<String> ignoreColumns) {
-        if (PersistenceStrategy.BLOB == stgy)
-            return "  \"" + col + "\" " + DataType.Name.BLOB.toString();
-
-        if (PersistenceStrategy.PRIMITIVE == stgy)
-            return "  \"" + col + "\" " + PropertyMappingHelper.getCassandraType(javaCls);
-
-        List<F> fields = getFields();
-
-        if (fields == null || fields.isEmpty()) {
-            throw new IllegalStateException("There are no POJO fields found for '" + javaCls.toString()
-                + "' class to be presented as a Cassandra primary key");
-        }
-
-        // Accumulating already processed columns in the set, to prevent duplicating columns
-        // shared by two different POJO fields.
-        Set<String> processedColumns = new HashSet<>();
-
-        StringBuilder builder = new StringBuilder();
-
-        for (F field : fields) {
-            if ((ignoreColumns != null && ignoreColumns.contains(field.getColumn())) ||
-                    processedColumns.contains(field.getColumn())) {
-                continue;
-            }
-
-            if (builder.length() > 0)
-                builder.append(",\n");
-
-            builder.append("  ").append(field.getColumnDDL());
-
-            processedColumns.add(field.getColumn());
-        }
-
-        return builder.toString();
-    }
-
-    /**
-     * Returns default name for Cassandra column (if it's not specified explicitly).
-     *
-     * @return column name
-     */
-    protected abstract String defaultColumnName();
-
-    /**
-     * Creates instance of {@link PojoField} based on it's description in XML element.
-     *
-     * @param el XML element describing POJO field
-     * @param clazz POJO java class.
-     */
-    protected abstract F createPojoField(Element el, Class clazz);
-
-    /**
-     * Creates instance of {@link PojoField} from its field accessor.
-     *
-     * @param accessor field accessor.
-     */
-    protected abstract F createPojoField(PojoFieldAccessor accessor);
-
-    /**
-     * Creates instance of {@link PojoField} based on the other instance and java class
-     * to initialize accessor.
-     *
-     * @param field PojoField instance
-     * @param clazz java class
-     */
-    protected abstract F createPojoField(F field, Class clazz);
-
-    /**
-     * Class instance initialization.
-     */
-    protected void init() {
-        if (getColumn() != null && !getColumn().trim().isEmpty()) {
-            tableColumns = new LinkedList<>();
-            tableColumns.add(getColumn());
-            tableColumns = Collections.unmodifiableList(tableColumns);
-
-            return;
-        }
-
-        List<F> fields = getFields();
-
-        if (fields == null || fields.isEmpty())
-            return;
-
-        tableColumns = new LinkedList<>();
-        casUniqueFields = new LinkedList<>();
-
-        for (F field : fields) {
-            if (!tableColumns.contains(field.getColumn())) {
-                tableColumns.add(field.getColumn());
-                casUniqueFields.add(field);
-            }
-        }
-
-        tableColumns = Collections.unmodifiableList(tableColumns);
-        casUniqueFields = Collections.unmodifiableList(casUniqueFields);
-    }
-
-    /**
-     * Checks if there are POJO filed with the same name or same Cassandra column specified in persistence settings.
-     *
-     * @param fields List of fields to be persisted into Cassandra.
-     */
-    protected void checkDuplicates(List<F> fields) {
-        if (fields == null || fields.isEmpty())
-            return;
-
-        for (PojoField field1 : fields) {
-            boolean sameNames = false;
-            boolean sameCols = false;
-
-            for (PojoField field2 : fields) {
-                if (field1.getName().equals(field2.getName())) {
-                    if (sameNames) {
-                        throw new IllegalArgumentException("Incorrect Cassandra persistence settings, " +
-                            "two POJO fields with the same name '" + field1.getName() + "' specified");
-                    }
-
-                    sameNames = true;
-                }
-
-                if (field1.getColumn().equals(field2.getColumn())) {
-                    if (sameCols && !CassandraHelper.isCassandraCompatibleTypes(field1.getJavaClass(), field2.getJavaClass())) {
-                        throw new IllegalArgumentException("Field '" + field1.getName() + "' shares the same Cassandra table " +
-                                "column '" + field1.getColumn() + "' with field '" + field2.getName() + "', but their Java " +
-                                "classes are different. Fields sharing the same column should have the same " +
-                                "Java class as their type or should be mapped to the same Cassandra primitive type.");
-                    }
-
-                    sameCols = true;
-                }
-            }
-        }
-    }
-
-    /**
-     * Extracts POJO fields from a list of corresponding XML field nodes.
-     *
-     * @param fieldNodes Field nodes to process.
-     * @return POJO fields list.
-     */
-    protected List<F> detectPojoFields(NodeList fieldNodes) {
-        List<F> detectedFields = new LinkedList<>();
-
-        if (fieldNodes != null && fieldNodes.getLength() != 0) {
-            int cnt = fieldNodes.getLength();
-
-            for (int i = 0; i < cnt; i++) {
-                F field = createPojoField((Element)fieldNodes.item(i), getJavaClass());
-
-                // Just checking that such field exists in the class
-                PropertyMappingHelper.getPojoFieldAccessor(getJavaClass(), field.getName());
-
-                detectedFields.add(field);
-            }
-
-            return detectedFields;
-        }
-
-        PropertyDescriptor[] descriptors = PropertyUtils.getPropertyDescriptors(getJavaClass());
-
-        // Collecting Java Beans property descriptors
-        if (descriptors != null) {
-            for (PropertyDescriptor desc : descriptors) {
-                // Skip POJO field if it's read-only
-                if (desc.getWriteMethod() != null) {
-                    Field field = null;
-
-                    try {
-                        field = getJavaClass().getDeclaredField(desc.getName());
-                    }
-                    catch (Throwable ignore) {
-                    }
-
-                    detectedFields.add(createPojoField(new PojoFieldAccessor(desc, field)));
-                }
-            }
-        }
-
-        Field[] fields = getJavaClass().getDeclaredFields();
-
-        // Collecting all fields annotated with @QuerySqlField
-        if (fields != null) {
-            for (Field field : fields) {
-                if (field.getAnnotation(QuerySqlField.class) != null && !PojoField.containsField(detectedFields, field.getName()))
-                    detectedFields.add(createPojoField(new PojoFieldAccessor(field)));
-            }
-        }
-
-        return detectedFields;
-    }
-
-    /**
-     * Instantiates Class object for particular class
-     *
-     * @param clazz class name
-     * @return Class object
-     */
-    private Class getClassInstance(String clazz) {
-        try {
-            return Class.forName(clazz);
-        }
-        catch (ClassNotFoundException ignored) {
-        }
-
-        try {
-            return Class.forName(clazz, true, Thread.currentThread().getContextClassLoader());
-        }
-        catch (ClassNotFoundException ignored) {
-        }
-
-        try {
-            return Class.forName(clazz, true, PersistenceSettings.class.getClassLoader());
-        }
-        catch (ClassNotFoundException ignored) {
-        }
-
-        try {
-            return Class.forName(clazz, true, ClassLoader.getSystemClassLoader());
-        }
-        catch (ClassNotFoundException ignored) {
-        }
-
-        throw new IgniteException("Failed to load class '" + clazz + "' using reflection");
-    }
-
-    /**
-     * Creates new object instance of particular class
-     *
-     * @param clazz class name
-     * @return object
-     */
-    private Object newObjectInstance(String clazz) {
-        try {
-            return getClassInstance(clazz).newInstance();
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Failed to instantiate class '" + clazz + "' using default constructor", e);
-        }
-    }
-
-    /**
-     * @see java.io.Serializable
-     */
-    private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
-        in.defaultReadObject();
-        casUniqueFields = Collections.unmodifiableList(enrichFields(casUniqueFields));
-    }
-
-    /**
-     * Sets accessor for the given {@code src} fields.
-     * Required as accessor is transient and is not present
-     * after deserialization.
-     */
-    protected List<F> enrichFields(List<F> src) {
-        if (src != null) {
-            List<F> enriched = new ArrayList<>();
-
-            for (F sourceField : src)
-                enriched.add(createPojoField(sourceField, getJavaClass()));
-
-            return enriched;
-        }
-        else
-            return new ArrayList<>();
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java
deleted file mode 100644
index 4b1e2d8274424..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PersistenceStrategy.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-/**
- * Describes persistence strategy to be used to persist object data into Cassandra.
- */
-public enum PersistenceStrategy {
-    /**
-     * Stores object value as is, by mapping its value to Cassandra table column with corresponding type.
-     * <p>
-     * Could be used for primitive java type (like Integer, String, Long and etc) which could be directly mapped
-     * to appropriate Cassandra types.
-     */
-    PRIMITIVE,
-
-    /**
-     * Stores object value as BLOB, by mapping its value to Cassandra table column with blob type.
-     * Could be used for any java type. Conversion of java object to BLOB is handled by specified serializer.
-     * <p>
-     * Available serializer implementations:
-     * <ul>
-     *     <li>
-     *         org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer - uses standard Java
-     *         serialization framework.
-     *     </li>
-     *     <li>
-     *        org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer - uses Kryo serialization
-     *        framework.
-     *     </li>
-     * </ul>
-     */
-    BLOB,
-
-    /**
-     * Stores each field of an object as a column having corresponding type in Cassandra table.
-     * Provides ability to utilize Cassandra secondary indexes for object fields.
-     * <p>
-     * Could be used for objects which follow JavaBeans convention and having empty public constructor.
-     * Object fields should be:
-     * <ul>
-     *     <li>Primitive java types like int, long, String and etc.</li>
-     *     <li>Collections of primitive java types like List<Integer>, Map<Integer, String>, Set<Long></li>
-     * </ul>
-     */
-    POJO
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
deleted file mode 100644
index facd48c215aa5..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoField.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import java.io.Serializable;
-import java.lang.annotation.Annotation;
-import java.util.List;
-import com.datastax.driver.core.DataType;
-import com.datastax.driver.core.Row;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-import org.apache.ignite.cache.store.cassandra.common.PropertyMappingHelper;
-import org.apache.ignite.cache.store.cassandra.serializer.Serializer;
-import org.w3c.dom.Element;
-
-/**
- * Descriptor for particular field in a POJO object, specifying how this field
- * should be written to or loaded from Cassandra.
- */
-public abstract class PojoField implements Serializable {
-    /** Name attribute of XML element describing Pojo field. */
-    private static final String NAME_ATTR = "name";
-
-    /** Column attribute of XML element describing Pojo field. */
-    private static final String COLUMN_ATTR = "column";
-
-    /** Field name. */
-    private String name;
-
-    /** Field column name in Cassandra table. */
-    private String col;
-
-    /** Field column DDL.  */
-    private String colDDL;
-
-    /** Indicator for calculated field. */
-    private Boolean calculated;
-
-    /** Field property accessor. */
-    private transient PojoFieldAccessor accessor;
-
-    /**
-     *  Checks if list contains POJO field with the specified name.
-     *
-     * @param fields list of POJO fields.
-     * @param fieldName field name.
-     * @return true if list contains field or false otherwise.
-     */
-    public static boolean containsField(List<? extends PojoField> fields, String fieldName) {
-        if (fields == null || fields.isEmpty())
-            return false;
-
-        for (PojoField field : fields) {
-            if (field.getName().equals(fieldName))
-                return true;
-        }
-
-        return false;
-    }
-
-    /**
-     * Creates instance of {@link PojoField} based on it's description in XML element.
-     *
-     * @param el XML element describing Pojo field
-     * @param pojoCls Pojo java class.
-     */
-    public PojoField(Element el, Class<?> pojoCls) {
-        if (el == null)
-            throw new IllegalArgumentException("DOM element representing POJO field object can't be null");
-
-        if (!el.hasAttribute(NAME_ATTR)) {
-            throw new IllegalArgumentException("DOM element representing POJO field object should have '"
-                + NAME_ATTR + "' attribute");
-        }
-
-        this.name = el.getAttribute(NAME_ATTR).trim();
-        this.col = el.hasAttribute(COLUMN_ATTR) ? el.getAttribute(COLUMN_ATTR).trim() : name.toLowerCase();
-
-        init(PropertyMappingHelper.getPojoFieldAccessor(pojoCls, name));
-    }
-
-    /**
-     * Creates instance of {@link PojoField} from its field accessor.
-     *
-     * @param accessor field accessor.
-     */
-    public PojoField(PojoFieldAccessor accessor) {
-        this.name = accessor.getName();
-
-        QuerySqlField sqlField = (QuerySqlField)accessor.getAnnotation(QuerySqlField.class);
-
-        col = sqlField != null && sqlField.name() != null && !sqlField.name().isEmpty() ?
-                sqlField.name() : name.toLowerCase();
-
-        init(accessor);
-    }
-
-    /**
-     * Creates instance of {@link PojoField} from the other instance
-     * and java class.
-     *
-     * @param field {@link PojoField} instance to copy from.
-     * @param pojoCls Class of the {@link PojoField} instance.
-     */
-    public PojoField(PojoField field, Class<?> pojoCls) {
-        this.name = field.name;
-        this.col = field.col;
-        this.colDDL = field.colDDL;
-
-        init(PropertyMappingHelper.getPojoFieldAccessor(pojoCls, name));
-    }
-
-    /**
-     * @return field name.
-     */
-    public String getName() {
-        return name;
-    }
-
-    /**
-     * Returns java class of the field.
-     *
-     * @return Java class.
-     */
-    public Class getJavaClass() {
-        return accessor.getFieldType();
-    }
-
-    /**
-     * @return Cassandra table column name.
-     */
-    public String getColumn() {
-        return col;
-    }
-
-    /**
-     * @return Cassandra table column DDL statement.
-     */
-    public String getColumnDDL() {
-        return colDDL;
-    }
-
-    /**
-     * Indicates if it's a calculated field - field which value just generated based on other field values.
-     * Such field will be stored in Cassandra as all other POJO fields, but it's value shouldn't be read from
-     * Cassandra - cause it's again just generated based on other field values. One of the good applications of such
-     * kind of fields - Cassandra materialized views build on top of other tables.
-     *
-     * @return {@code true} if it's auto generated field, {@code false} if not.
-     */
-    public boolean calculatedField() {
-        if (calculated != null)
-            return calculated;
-
-        return calculated = accessor.isReadOnly();
-    }
-
-    /**
-     * Gets field value as an object having Cassandra compatible type.
-     * This it could be stored directly into Cassandra without any conversions.
-     *
-     * @param obj Object instance.
-     * @param serializer {@link org.apache.ignite.cache.store.cassandra.serializer.Serializer} to use.
-     * @return Object to store in Cassandra table column.
-     */
-    public Object getValueFromObject(Object obj, Serializer serializer) {
-        Object val = accessor.getValue(obj);
-
-        if (val == null)
-            return null;
-
-        DataType.Name cassandraType = PropertyMappingHelper.getCassandraType(val.getClass());
-
-        if (cassandraType != null)
-            return val;
-
-        if (serializer == null) {
-            throw new IllegalStateException("Can't serialize value from object '" +
-                val.getClass().getName() + "' field '" + name + "', cause there is no BLOB serializer specified");
-        }
-
-        return serializer.serialize(val);
-    }
-
-    /**
-     * Returns POJO field annotation.
-     *
-     * @param clazz Class of the annotation to get.
-     * @return annotation.
-     */
-    public Annotation getAnnotation(Class clazz) {
-        return accessor.getAnnotation(clazz);
-    }
-
-    /**
-     * Sets object field value from a {@link com.datastax.driver.core.Row} returned by Cassandra CQL statement.
-     *
-     * @param row {@link com.datastax.driver.core.Row}
-     * @param obj object which field should be populated from {@link com.datastax.driver.core.Row}
-     * @param serializer {@link org.apache.ignite.cache.store.cassandra.serializer.Serializer} to use.
-     */
-    public void setValueFromRow(Row row, Object obj, Serializer serializer) {
-        if (calculatedField())
-            return;
-
-        Object val = PropertyMappingHelper.getCassandraColumnValue(row, col, accessor.getFieldType(), serializer);
-
-        accessor.setValue(obj, val);
-    }
-
-    /**
-     * Initializes field info from property descriptor.
-     *
-     * @param accessor {@link PojoFieldAccessor} accessor.
-     */
-    private void init(PojoFieldAccessor accessor) {
-        DataType.Name cassandraType = PropertyMappingHelper.getCassandraType(accessor.getFieldType());
-        cassandraType = cassandraType == null ? DataType.Name.BLOB : cassandraType;
-
-        this.colDDL = "\"" + col + "\" " + cassandraType.toString();
-
-        this.accessor = accessor;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoFieldAccessor.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoFieldAccessor.java
deleted file mode 100644
index c8ff3e54d0dbb..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoFieldAccessor.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import java.beans.PropertyDescriptor;
-import java.lang.annotation.Annotation;
-import java.lang.reflect.Field;
-import org.apache.ignite.IgniteException;
-
-/**
- * Property accessor provides read/write access to POJO object properties defined through:
- *  1) Getter/setter methods
- *  2) Raw class members
- */
-public class PojoFieldAccessor {
-    /** Java Bean property descriptor */
-    private PropertyDescriptor desc;
-
-    /** Object field associated with property descriptor. Used just to get annotations which
-     * applied not to property descriptor, but directly to object field associated with the property. */
-    private Field descField;
-
-    /** Object field */
-    private Field field;
-
-    /**
-     * Constructs object instance from Java Bean property descriptor, providing access to getter/setter.
-     *
-     * @param desc Java Bean property descriptor.
-     * @param field object field associated with property descriptor.
-     */
-    public PojoFieldAccessor(PropertyDescriptor desc, Field field) {
-        if (desc.getReadMethod() == null) {
-            throw new IllegalArgumentException("Field '" + desc.getName() +
-                    "' of the class instance '" + desc.getPropertyType().getName() +
-                    "' doesn't provide getter method");
-        }
-
-        desc.getReadMethod().setAccessible(true);
-
-        if (desc.getWriteMethod() != null)
-            desc.getWriteMethod().setAccessible(true);
-
-        this.desc = desc;
-        this.descField = field;
-    }
-
-    /**
-     * Constructs object instance from Field, providing direct access to class member.
-     *
-     * @param field Field descriptor.
-     */
-    public PojoFieldAccessor(Field field) {
-        field.setAccessible(true);
-        this.field = field;
-    }
-
-    /**
-     * Returns POJO field name.
-     *
-     * @return field name.
-     */
-    public String getName() {
-        return desc != null ? desc.getName() : field.getName();
-    }
-
-    /**
-     * Indicates if it's read-only field.
-     *
-     * @return true if field read-only, false if not.
-     */
-    public boolean isReadOnly() {
-        return desc != null && desc.getWriteMethod() == null;
-    }
-
-    /**
-     * Returns POJO field annotation.
-     *
-     * @param clazz Class of the annotation to get.
-     * @return annotation.
-     */
-    public Annotation getAnnotation(Class clazz) {
-        if (field != null)
-            return field.getAnnotation(clazz);
-
-        Annotation ann = desc.getReadMethod().getAnnotation(clazz);
-
-        if (ann != null)
-            return ann;
-
-        ann = desc.getWriteMethod() == null ? null : desc.getWriteMethod().getAnnotation(clazz);
-
-        if (ann != null)
-            return ann;
-
-        return descField == null ? null : descField.getAnnotation(clazz);
-    }
-
-    /**
-     * Returns field value for the object instance.
-     *
-     * @param obj object instance.
-     * @return field value.
-     */
-    public Object getValue(Object obj) {
-        try {
-            return desc != null ? desc.getReadMethod().invoke(obj) : field.get(obj);
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Failed to get value of the field '" + getName() + "' from the instance " +
-                    " of '" + obj.getClass().toString() + "' class", e);
-        }
-    }
-
-    /**
-     * Assigns value for the object field.
-     *
-     * @param obj object instance.
-     * @param val value to assign.
-     */
-    public void setValue(Object obj, Object val) {
-        if (isReadOnly())
-            throw new IgniteException("Can't assign value to read-only field '" + getName() + "' of the instance " +
-                    " of '" + obj.getClass().toString() + "' class");
-
-        try {
-            if (desc != null)
-                desc.getWriteMethod().invoke(obj, val);
-            else
-                field.set(obj, val);
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Failed to set value of the field '" + getName() + "' of the instance " +
-                    " of '" + obj.getClass().toString() + "' class", e);
-        }
-    }
-
-    /**
-     * Returns field type.
-     *
-     * @return field type.
-     */
-    public Class getFieldType() {
-        return desc != null ? desc.getPropertyType() : field.getType();
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
deleted file mode 100644
index 2b02fe5c4c66e..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoKeyField.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-import org.w3c.dom.Element;
-
-/**
- * Descriptor for Ignite key POJO class
- */
-public class PojoKeyField extends PojoField {
-    /**
-     * Specifies sort order for POJO key field
-     */
-    public enum SortOrder {
-        /** Ascending sort order. */
-        ASC,
-        /** Descending sort order. */
-        DESC
-    }
-
-    /** Xml attribute specifying sort order. */
-    private static final String SORT_ATTR = "sort";
-
-    /** Sort order. */
-    private SortOrder sortOrder;
-
-    /**
-     * Constructs Ignite cache key POJO object descriptor.
-     *
-     * @param el xml configuration element.
-     * @param pojoCls java class of key POJO field.
-     */
-    public PojoKeyField(Element el, Class pojoCls) {
-        super(el, pojoCls);
-
-        if (el.hasAttribute(SORT_ATTR)) {
-            try {
-                sortOrder = SortOrder.valueOf(el.getAttribute(SORT_ATTR).trim().toUpperCase());
-            }
-            catch (IllegalArgumentException ignored) {
-                throw new IllegalArgumentException("Incorrect sort order '" + el.getAttribute(SORT_ATTR) + "' specified");
-            }
-        }
-    }
-
-    /**
-     * Constructs instance of {@code PojoKeyField} based on the other instance and java class
-     * to initialize accessor.
-     *
-     * @param field PojoKeyField instance
-     * @param pojoCls java class of the corresponding POJO
-     */
-    public PojoKeyField(PojoKeyField field, Class<?> pojoCls) {
-        super(field, pojoCls);
-
-        sortOrder = field.sortOrder;
-    }
-
-    /**
-     * Constructs Ignite cache key POJO object descriptor.
-     *
-     * @param accessor property descriptor.
-     */
-    public PojoKeyField(PojoFieldAccessor accessor) {
-        super(accessor);
-
-        QuerySqlField sqlField = (QuerySqlField)accessor.getAnnotation(QuerySqlField.class);
-
-        if (sqlField != null && sqlField.descending())
-            sortOrder = SortOrder.DESC;
-    }
-
-    /**
-     * Returns sort order for the field.
-     *
-     * @return sort order.
-     */
-    public SortOrder getSortOrder() {
-        return sortOrder;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
deleted file mode 100644
index 0427e6cd77b6f..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/PojoValueField.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-import org.w3c.dom.Element;
-
-/**
- * Descriptor for Ignite value POJO class
- */
-public class PojoValueField extends PojoField {
-    /** Xml attribute specifying that Cassandra column is static. */
-    private static final String STATIC_ATTR = "static";
-
-    /** Xml attribute specifying that secondary index should be created for Cassandra column. */
-    private static final String INDEX_ATTR = "index";
-
-    /** Xml attribute specifying secondary index custom class. */
-    private static final String INDEX_CLASS_ATTR = "indexClass";
-
-    /** Xml attribute specifying secondary index options. */
-    private static final String INDEX_OPTIONS_ATTR = "indexOptions";
-
-    /** Indicates if Cassandra column should be indexed. */
-    private Boolean isIndexed;
-
-    /** Custom java class for Cassandra secondary index. */
-    private String idxCls;
-
-    /** Secondary index options. */
-    private String idxOptions;
-
-    /** Indicates if Cassandra column is static. */
-    private Boolean isStatic;
-
-    /**
-     * Constructs Ignite cache value field descriptor.
-     *
-     * @param el field descriptor xml configuration element.
-     * @param pojoCls field java class
-     */
-    public PojoValueField(Element el, Class pojoCls) {
-        super(el, pojoCls);
-
-        if (el.hasAttribute(STATIC_ATTR))
-            isStatic = Boolean.parseBoolean(el.getAttribute(STATIC_ATTR).trim().toLowerCase());
-
-        if (el.hasAttribute(INDEX_ATTR))
-            isIndexed = Boolean.parseBoolean(el.getAttribute(INDEX_ATTR).trim().toLowerCase());
-
-        if (el.hasAttribute(INDEX_CLASS_ATTR))
-            idxCls = el.getAttribute(INDEX_CLASS_ATTR).trim();
-
-        if (el.hasAttribute(INDEX_OPTIONS_ATTR)) {
-            idxOptions = el.getAttribute(INDEX_OPTIONS_ATTR).trim();
-
-            if (!idxOptions.toLowerCase().startsWith("with")) {
-                idxOptions = idxOptions.toLowerCase().startsWith("options") ?
-                    "with " + idxOptions :
-                    "with options = " + idxOptions;
-            }
-        }
-    }
-
-    /**
-     * Constructs Ignite cache value field descriptor.
-     *
-     * @param accessor field property accessor.
-     */
-    public PojoValueField(PojoFieldAccessor accessor) {
-        super(accessor);
-
-        QuerySqlField sqlField = (QuerySqlField)accessor.getAnnotation(QuerySqlField.class);
-
-        isIndexed = sqlField != null && sqlField.index();
-    }
-
-    /**
-     * Constructs instance of {@code PojoValueField} based on the other instance and java class
-     * to initialize accessor.
-     *
-     * @param field PojoValueField instance
-     * @param pojoCls java class of the corresponding POJO
-     */
-    public PojoValueField(PojoValueField field, Class<?> pojoCls) {
-        super(field, pojoCls);
-
-        isStatic = field.isStatic;
-        isIndexed = field.isIndexed;
-        idxCls = field.idxCls;
-        idxOptions = field.idxOptions;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getColumnDDL() {
-        String colDDL = super.getColumnDDL();
-
-        if (isStatic != null && isStatic)
-            colDDL += " static";
-
-        return colDDL;
-    }
-
-    /**
-     * Indicates if secondary index should be created for the field.
-     *
-     * @return true/false if secondary index should/shouldn't be created for the field.
-     */
-    public boolean isIndexed() {
-        return isIndexed != null && isIndexed;
-    }
-
-    /**
-     * Returns DDL for the field secondary index.
-     *
-     * @param keyspace Cassandra keyspace where index should be created.
-     * @param tbl Cassandra table for which secondary index should be created.
-     *
-     * @return secondary index DDL.
-     */
-    public String getIndexDDL(String keyspace, String tbl) {
-        if (isIndexed == null || !isIndexed)
-            return null;
-
-        StringBuilder builder = new StringBuilder();
-
-        if (idxCls != null)
-            builder.append("create custom index if not exists on \"").append(keyspace).append("\".\"").append(tbl).append("\"");
-        else
-            builder.append("create index if not exists on \"").append(keyspace).append("\".\"").append(tbl).append("\"");
-
-        builder.append(" (\"").append(getColumn()).append("\")");
-
-        if (idxCls != null)
-            builder.append(" using '").append(idxCls).append("'");
-
-        if (idxOptions != null)
-            builder.append(" ").append(idxOptions);
-
-        return builder.append(";").toString();
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
deleted file mode 100644
index 5e106af70555e..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/ValuePersistenceSettings.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.w3c.dom.Element;
-import org.w3c.dom.NodeList;
-
-/**
- * Stores persistence settings for Ignite cache value
- */
-public class ValuePersistenceSettings extends PersistenceSettings<PojoValueField> {
-    /** XML element describing value field settings. */
-    private static final String FIELD_ELEMENT = "field";
-
-    /** Value fields. */
-    private List<PojoValueField> fields = new LinkedList<>();
-
-    /**
-     * Creates class instance from XML configuration.
-     *
-     * @param el XML element describing value persistence settings.
-     */
-    public ValuePersistenceSettings(Element el) {
-        super(el);
-
-        if (PersistenceStrategy.POJO != getStrategy()) {
-            init();
-
-            return;
-        }
-
-        NodeList nodes = el.getElementsByTagName(FIELD_ELEMENT);
-
-        fields = detectPojoFields(nodes);
-
-        if (fields.isEmpty())
-            throw new IllegalStateException("Failed to initialize value fields for class '" + getJavaClass().getName() + "'");
-
-        checkDuplicates(fields);
-
-        init();
-    }
-
-    /**
-     * @return List of value fields.
-     */
-    @Override public List<PojoValueField> getFields() {
-        return fields == null ? null : Collections.unmodifiableList(fields);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String defaultColumnName() {
-        return "value";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected PojoValueField createPojoField(Element el, Class clazz) {
-        return new PojoValueField(el, clazz);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected PojoValueField createPojoField(PojoFieldAccessor accessor) {
-        return new PojoValueField(accessor);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected PojoValueField createPojoField(PojoValueField field, Class clazz) {
-        return new PojoValueField(field, clazz);
-    }
-
-    /**
-     * @see java.io.Serializable
-     */
-    private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
-        in.defaultReadObject();
-
-        fields = enrichFields(fields);
-    }
-
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java
deleted file mode 100644
index 7dd0840ae00c7..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/persistence/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains persistent settings configuration
- */
-
-package org.apache.ignite.cache.store.cassandra.persistence;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
deleted file mode 100644
index 44d2d47019f26..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/JavaSerializer.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.serializer;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.nio.ByteBuffer;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Serializer based on standard Java serialization.
- */
-public class JavaSerializer implements Serializer {
-    /** */
-    private static final int DFLT_BUFFER_SIZE = 4096;
-
-    /** {@inheritDoc} */
-    @Override public ByteBuffer serialize(Object obj) {
-        if (obj == null)
-            return null;
-
-        ByteArrayOutputStream stream = null;
-        ObjectOutputStream out = null;
-
-        try {
-            stream = new ByteArrayOutputStream(DFLT_BUFFER_SIZE);
-
-            out = new ObjectOutputStream(stream);
-            out.writeObject(obj);
-            out.flush();
-
-            return ByteBuffer.wrap(stream.toByteArray());
-        }
-        catch (IOException e) {
-            throw new IllegalStateException("Failed to serialize object of the class '" + obj.getClass().getName() + "'", e);
-        }
-        finally {
-            U.closeQuiet(out);
-            U.closeQuiet(stream);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object deserialize(ByteBuffer buf) {
-        ByteArrayInputStream stream = null;
-        ObjectInputStream in = null;
-
-        try {
-            stream = new ByteArrayInputStream(buf.array());
-            in = new ObjectInputStream(stream);
-
-            return in.readObject();
-        }
-        catch (Throwable e) {
-            throw new IllegalStateException("Failed to deserialize object from byte stream", e);
-        }
-        finally {
-            U.closeQuiet(in);
-            U.closeQuiet(stream);
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java
deleted file mode 100644
index 5b8d5422b32fb..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/Serializer.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.serializer;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-
-/**
- * Interface which should be implemented by all serializers responsible
- * for writing/loading data to/from Cassandra in binary (BLOB) format.
- */
-public interface Serializer extends Serializable {
-    /**
-     * Serializes object into byte buffer.
-     *
-     * @param obj Object to serialize.
-     * @return Byte buffer with binary data.
-     */
-    public ByteBuffer serialize(Object obj);
-
-    /**
-     * Deserializes object from byte buffer.
-     *
-     * @param buf Byte buffer.
-     * @return Deserialized object.
-     */
-    public Object deserialize(ByteBuffer buf);
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
deleted file mode 100644
index aa1bccfa07d7b..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/serializer/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains serializers implementation, to store BLOBs into Cassandra
- */
-
-package org.apache.ignite.cache.store.cassandra.serializer;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
deleted file mode 100644
index 5d971e8779d43..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchExecutionAssistant.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.Row;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-
-/**
- * Provides information for batch operations (loadAll, deleteAll, writeAll) of Ignite cache
- * backed by {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}.
- *
- * @param <R> type of the result returned from batch operation.
- * @param <V> type of the value used in batch operation.
- */
-public interface BatchExecutionAssistant<R, V> {
-    /**
-     * Indicates if Cassandra tables existence is required for this batch operation.
-     *
-     * @return {@code true} true if table existence required.
-     */
-    public boolean tableExistenceRequired();
-
-    /**
-     * Cassandra table to use for an operation.
-     *
-     * @return Table name.
-     */
-    public String getTable();
-
-    /**
-     * Returns unbind CLQ statement for to be executed inside batch operation.
-     *
-     * @return Unbind CQL statement.
-     */
-    public String getStatement();
-
-    /**
-     * Binds prepared statement to current Cassandra session.
-     *
-     * @param statement Statement.
-     * @param obj Parameters for statement binding.
-     * @return Bounded statement.
-     */
-    public BoundStatement bindStatement(PreparedStatement statement, V obj);
-
-    /**
-     *  Returns Ignite cache key/value persistence settings.
-     *
-     * @return persistence settings.
-     */
-    public KeyValuePersistenceSettings getPersistenceSettings();
-
-    /**
-     * Display name for the batch operation.
-     *
-     * @return Operation display name.
-     */
-    public String operationName();
-
-    /**
-     * Processes particular row inside batch operation.
-     *
-     * @param row Row to process.
-     * @param seqNum Sequential number of the row.
-     */
-    public void process(Row row, int seqNum);
-
-    /**
-     * Checks if row/object with specified sequential number is already processed.
-     *
-     * @param seqNum object sequential number
-     * @return {@code true} if object is already processed
-     */
-    public boolean alreadyProcessed(int seqNum);
-
-    /**
-     * @return number of processed objects/rows.
-     */
-    public int processedCount();
-
-    /**
-     * @return batch operation result.
-     */
-    public R processedData();
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java
deleted file mode 100644
index 387c98f4bdfdf..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/BatchLoaderAssistant.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Statement;
-
-/**
- * Provides information for loadCache operation of {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}.
- */
-public interface BatchLoaderAssistant {
-    /**
-     * Returns name of the batch load operation.
-     *
-     * @return operation name.
-     */
-    public String operationName();
-
-    /**
-     * Returns CQL statement to use in batch load operation.
-     *
-     * @return CQL statement for batch load operation.
-     */
-    public Statement getStatement();
-
-    /**
-     * Processes each row returned by batch load operation.
-     *
-     * @param row row selected from Cassandra table.
-     */
-    public void process(Row row);
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
deleted file mode 100644
index facfa40a49487..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSession.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import java.io.Closeable;
-import java.util.List;
-import org.apache.ignite.cache.store.cassandra.session.transaction.Mutation;
-
-/**
- * Wrapper around Cassandra driver session, to automatically handle:
- * <ul>
- *  <li>Keyspace and table absence exceptions</li>
- *  <li>Timeout exceptions</li>
- *  <li>Batch operations</li>
- * </ul>
- */
-public interface CassandraSession extends Closeable {
-    /**
-     * Execute single synchronous operation against Cassandra  database.
-     *
-     * @param assistant execution assistance to perform the main operation logic.
-     * @param <V> type of the result returned from operation.
-     *
-     * @return result of the operation.
-     */
-    public <V> V execute(ExecutionAssistant<V> assistant);
-
-    /**
-     * Executes batch asynchronous operation against Cassandra database.
-     *
-     * @param assistant execution assistance to perform the main operation logic.
-     * @param data data which should be processed in batch operation.
-     * @param <R> type of the result returned from batch operation.
-     * @param <V> type of the value used in batch operation.
-     *
-     * @return result of the operation.
-     */
-    public <R, V> R execute(BatchExecutionAssistant<R, V> assistant, Iterable<? extends V> data);
-
-    /**
-     * Executes batch asynchronous operation to load bunch of records
-     * specified by CQL statement from Cassandra database
-     *
-     * @param assistant execution assistance to perform the main operation logic.
-     */
-    public void execute(BatchLoaderAssistant assistant);
-
-    /**
-     * Executes all the mutations performed withing Ignite transaction against Cassandra database.
-     *
-     * @param mutations Mutations.
-     */
-    public void execute(List<Mutation> mutations);
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
deleted file mode 100644
index 53aa424744171..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java
+++ /dev/null
@@ -1,1030 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantLock;
-import javax.cache.Cache;
-import com.datastax.driver.core.BatchStatement;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.ResultSetFuture;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.Statement;
-import com.datastax.driver.core.exceptions.AlreadyExistsException;
-import com.datastax.driver.core.exceptions.InvalidQueryException;
-import com.datastax.driver.core.querybuilder.Batch;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.cache.store.cassandra.common.CassandraHelper;
-import org.apache.ignite.cache.store.cassandra.common.RandomSleeper;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.cache.store.cassandra.session.pool.SessionPool;
-import org.apache.ignite.cache.store.cassandra.session.transaction.Mutation;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.internal.util.typedef.internal.LT;
-
-/**
- * Implementation for {@link org.apache.ignite.cache.store.cassandra.session.CassandraSession}.
- */
-public class CassandraSessionImpl implements CassandraSession {
-    /** Number of CQL query execution attempts. */
-    private static final int CQL_EXECUTION_ATTEMPTS_COUNT = 20;
-
-    /** Min timeout between CQL query execution attempts. */
-    private static final int CQL_EXECUTION_ATTEMPT_MIN_TIMEOUT = 100;
-
-    /** Max timeout between CQL query execution attempts. */
-    private static final int CQL_EXECUTION_ATTEMPT_MAX_TIMEOUT = 500;
-
-    /** Timeout increment for CQL query execution attempts. */
-    private static final int CQL_ATTEMPTS_TIMEOUT_INCREMENT = 100;
-
-    /** Cassandra cluster builder. */
-    private volatile Cluster.Builder builder;
-
-    /**
-     * Current generation number of Cassandra session. Each time session recreated its generation will be incremented.
-     * The main idea behind session generation is to track prepared statements created with old Cassandra
-     * session (which is not valid anymore) and avoid extra refresh of Cassandra session by multiple threads.
-     **/
-    private volatile Long generation = 0L;
-
-    /** Wrapped Cassandra session. **/
-    private volatile WrappedSession wrapperSes;
-
-    /** Number of references to Cassandra driver session (for multithreaded environment). */
-    private volatile int refCnt;
-
-    /** Storage for the session prepared statements */
-    private static final Map<String, WrappedPreparedStatement> sesStatements = new HashMap<>();
-
-    /** Number of records to immediately fetch in CQL statement execution. */
-    private Integer fetchSize;
-
-    /** Consistency level for Cassandra READ operations (select). */
-    private ConsistencyLevel readConsistency;
-
-    /** Consistency level for Cassandra WRITE operations (insert/update/delete). */
-    private ConsistencyLevel writeConsistency;
-
-    /** Expiration timeout. */
-    private long expirationTimeout;
-
-    /** Logger. */
-    private IgniteLogger log;
-
-    /** Table absence error handlers counter. */
-    private final Map<String, AtomicInteger> tblAbsenceHandlersCnt = new ConcurrentHashMap<>();
-
-    /** Lock used to synchronize multiple threads trying to do session refresh. **/
-    private final ReentrantLock refreshLock = new ReentrantLock();
-
-    /**
-     * Creates instance of Cassandra driver session wrapper.
-     *
-     * @param builder Builder for Cassandra cluster.
-     * @param fetchSize Number of rows to immediately fetch in CQL statement execution.
-     * @param readConsistency Consistency level for Cassandra READ operations (select).
-     * @param writeConsistency Consistency level for Cassandra WRITE operations (insert/update/delete).
-     * @param expirationTimeout Expiration timout.
-     * @param log Logger.
-     */
-    public CassandraSessionImpl(Cluster.Builder builder, Integer fetchSize, ConsistencyLevel readConsistency,
-        ConsistencyLevel writeConsistency, long expirationTimeout, IgniteLogger log) {
-        this.builder = builder;
-        this.fetchSize = fetchSize;
-        this.readConsistency = readConsistency;
-        this.writeConsistency = writeConsistency;
-        this.expirationTimeout = expirationTimeout;
-        this.log = log;
-    }
-
-    /** {@inheritDoc} */
-    @Override public <V> V execute(ExecutionAssistant<V> assistant) {
-        int attempt = 0;
-        Throwable error = null;
-        String errorMsg = "Failed to execute Cassandra CQL statement: " + assistant.getStatement();
-
-        RandomSleeper sleeper = newSleeper();
-
-        incrementSessionRefs();
-
-        try {
-            while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-                if (attempt != 0) {
-                    log.warning("Trying " + (attempt + 1) + " attempt to execute Cassandra CQL statement: " +
-                            assistant.getStatement());
-                }
-
-                WrappedPreparedStatement preparedSt = null;
-                WrappedSession ses = null;
-
-                try {
-                    preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(),
-                        assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
-
-                    if (preparedSt == null)
-                        return null;
-
-                    Statement statement = tuneStatementExecutionOptions(assistant.bindStatement(preparedSt));
-
-                    ses = session();
-
-                    ResultSet res = ses.execute(statement);
-
-                    Row row = res == null || !res.iterator().hasNext() ? null : res.iterator().next();
-
-                    return row == null ? null : assistant.process(row);
-                }
-                catch (Throwable e) {
-                    error = e;
-
-                    if (CassandraHelper.isTableAbsenceError(e)) {
-                        if (!assistant.tableExistenceRequired()) {
-                            log.warning(errorMsg, e);
-                            return null;
-                        }
-
-                        handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
-                    }
-                    else if (CassandraHelper.isHostsAvailabilityError(e))
-                        handleHostsAvailabilityError(ses == null ? -1 : ses.generation, e, attempt, errorMsg);
-                    else if (CassandraHelper.isPreparedStatementClusterError(e))
-                        handlePreparedStatementClusterError(preparedSt == null ? -1 : preparedSt.generation, e);
-                    else
-                        // For an error which we don't know how to handle, we will not try next attempts and terminate.
-                        throw new IgniteException(errorMsg, e);
-                }
-
-                if (!CassandraHelper.isTableAbsenceError(error))
-                    sleeper.sleep();
-
-                attempt++;
-            }
-        }
-        catch (Throwable e) {
-            error = e;
-        }
-        finally {
-            decrementSessionRefs();
-        }
-
-        log.error(errorMsg, error);
-
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /** {@inheritDoc} */
-    @Override public <R, V> R execute(BatchExecutionAssistant<R, V> assistant, Iterable<? extends V> data) {
-        if (data == null || !data.iterator().hasNext())
-            return assistant.processedData();
-
-        int attempt = 0;
-        String errorMsg = "Failed to execute Cassandra " + assistant.operationName() + " operation";
-        Throwable error = new IgniteException(errorMsg);
-
-        RandomSleeper sleeper = newSleeper();
-
-        int dataSize = 0;
-
-        incrementSessionRefs();
-
-        try {
-            while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-                if (attempt != 0) {
-                    log.warning("Trying " + (attempt + 1) + " attempt to execute Cassandra batch " +
-                            assistant.operationName() + " operation to process rest " +
-                            (dataSize - assistant.processedCount()) + " of " + dataSize + " elements");
-                }
-
-                //clean errors info before next communication with Cassandra
-                Throwable unknownEx = null;
-                Throwable tblAbsenceEx = null;
-                Throwable hostsAvailEx = null;
-                Throwable prepStatEx = null;
-
-                List<Cache.Entry<Integer, ResultSetFuture>> futResults = new LinkedList<>();
-
-                WrappedPreparedStatement preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(),
-                    assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
-
-                if (preparedSt == null)
-                    return null;
-
-                WrappedSession ses = null;
-
-                int seqNum = 0;
-
-                for (V obj : data) {
-                    if (!assistant.alreadyProcessed(seqNum)) {
-                        try {
-                            ses = session();
-                            Statement statement = tuneStatementExecutionOptions(assistant.bindStatement(preparedSt, obj));
-                            ResultSetFuture fut = ses.executeAsync(statement);
-                            futResults.add(new CacheEntryImpl<>(seqNum, fut));
-                        }
-                        catch (Throwable e) {
-                            if (CassandraHelper.isTableAbsenceError(e)) {
-                                // If there are table absence error and it is not required for the operation we can return.
-                                if (!assistant.tableExistenceRequired())
-                                    return assistant.processedData();
-
-                                tblAbsenceEx = e;
-                                handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
-                            }
-                            else if (CassandraHelper.isHostsAvailabilityError(e)) {
-                                hostsAvailEx = e;
-
-                                // Handle host availability only once.
-                                if (hostsAvailEx == null)
-                                    handleHostsAvailabilityError(ses == null ? 0 : ses.generation, e, attempt, errorMsg);
-                            }
-                            else if (CassandraHelper.isPreparedStatementClusterError(e)) {
-                                prepStatEx = e;
-
-                                handlePreparedStatementClusterError(preparedSt.generation, e);
-
-                                preparedSt = prepareStatement(assistant.getTable(), assistant.getStatement(),
-                                        assistant.getPersistenceSettings(), assistant.tableExistenceRequired());
-
-                                if (preparedSt == null)
-                                    return null;
-                            }
-                            else
-                                unknownEx = e;
-                        }
-                    }
-
-                    seqNum++;
-                }
-
-                dataSize = seqNum;
-
-                // For an error which we don't know how to handle, we will not try next attempts and terminate.
-                if (unknownEx != null)
-                    throw new IgniteException(errorMsg, unknownEx);
-
-                // Remembering any of last errors.
-                if (tblAbsenceEx != null)
-                    error = tblAbsenceEx;
-                else if (hostsAvailEx != null)
-                    error = hostsAvailEx;
-                else if (prepStatEx != null)
-                    error = prepStatEx;
-
-                // Clean errors info before next communication with Cassandra.
-                unknownEx = null;
-                tblAbsenceEx = null;
-                hostsAvailEx = null;
-                prepStatEx = null;
-
-                for (Cache.Entry<Integer, ResultSetFuture> futureResult : futResults) {
-                    try {
-                        ResultSet resSet = futureResult.getValue().getUninterruptibly();
-                        Row row = resSet != null && resSet.iterator().hasNext() ? resSet.iterator().next() : null;
-
-                        assistant.process(row, futureResult.getKey());
-                    }
-                    catch (Throwable e) {
-                        if (CassandraHelper.isTableAbsenceError(e))
-                            tblAbsenceEx = e;
-                        else if (CassandraHelper.isHostsAvailabilityError(e))
-                            hostsAvailEx = e;
-                        else if (CassandraHelper.isPreparedStatementClusterError(e))
-                            prepStatEx = e;
-                        else
-                            unknownEx = e;
-                    }
-                }
-
-                // For an error which we don't know how to handle, we will not try next attempts and terminate.
-                if (unknownEx != null)
-                    throw new IgniteException(errorMsg, unknownEx);
-
-                // If there are no errors occurred it means that operation successfully completed and we can return.
-                if (tblAbsenceEx == null && hostsAvailEx == null && prepStatEx == null && assistant.processedCount() == dataSize)
-                    return assistant.processedData();
-
-                if (tblAbsenceEx != null) {
-                    // If there are table absence error and it is not required for the operation we can return.
-                    if (!assistant.tableExistenceRequired())
-                        return assistant.processedData();
-
-                    error = tblAbsenceEx;
-                    handleTableAbsenceError(assistant.getTable(), assistant.getPersistenceSettings());
-                }
-
-                if (hostsAvailEx != null) {
-                    error = hostsAvailEx;
-                    handleHostsAvailabilityError(ses.generation, hostsAvailEx, attempt, errorMsg);
-                }
-
-                if (prepStatEx != null) {
-                    error = prepStatEx;
-                    handlePreparedStatementClusterError(preparedSt.generation, prepStatEx);
-                }
-
-                if (!CassandraHelper.isTableAbsenceError(error))
-                    sleeper.sleep();
-
-                attempt++;
-            }
-        }
-        catch (Throwable e) {
-            error = e;
-        }
-        finally {
-            decrementSessionRefs();
-        }
-
-        errorMsg = "Failed to process " + (dataSize - assistant.processedCount()) +
-            " of " + dataSize + " elements, during " + assistant.operationName() +
-            " operation with Cassandra";
-
-        LT.warn(log, error, errorMsg, false, false);
-
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void execute(BatchLoaderAssistant assistant) {
-        int attempt = 0;
-        String errorMsg = "Failed to execute Cassandra " + assistant.operationName() + " operation";
-        Throwable error = new IgniteException(errorMsg);
-
-        RandomSleeper sleeper = newSleeper();
-
-        incrementSessionRefs();
-
-        try {
-            while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-                if (attempt != 0)
-                    log.warning("Trying " + (attempt + 1) + " attempt to load Ignite cache");
-
-                Statement statement = tuneStatementExecutionOptions(assistant.getStatement());
-
-                WrappedSession ses = null;
-
-                try {
-                    ses = session();
-
-                    ResultSetFuture fut = ses.executeAsync(statement);
-                    ResultSet resSet = fut.getUninterruptibly();
-
-                    if (resSet == null || !resSet.iterator().hasNext())
-                        return;
-
-                    for (Row row : resSet)
-                        assistant.process(row);
-
-                    return;
-                }
-                catch (Throwable e) {
-                    error = e;
-
-                    if (CassandraHelper.isTableAbsenceError(e))
-                        return;
-                    else if (CassandraHelper.isHostsAvailabilityError(e))
-                        handleHostsAvailabilityError(ses == null ? 0 : ses.generation, e, attempt, errorMsg);
-                    else
-                        // For an error which we don't know how to handle, we will not try next attempts and terminate.
-                        throw new IgniteException(errorMsg, e);
-                }
-
-                sleeper.sleep();
-
-                attempt++;
-            }
-        }
-        catch (Throwable e) {
-            error = e;
-        }
-        finally {
-            decrementSessionRefs();
-        }
-
-        log.error(errorMsg, error);
-
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void execute(List<Mutation> mutations) {
-        if (mutations == null || mutations.isEmpty())
-            return;
-
-        Throwable error = null;
-        String errorMsg = "Failed to apply " + mutations.size() + " mutations performed withing Ignite " +
-                "transaction into Cassandra";
-
-        int attempt = 0;
-        boolean tableExistenceRequired = false;
-        Map<String, WrappedPreparedStatement> statements = new HashMap<>();
-        Map<String, KeyValuePersistenceSettings> tableSettings = new HashMap<>();
-        RandomSleeper sleeper = newSleeper();
-
-        incrementSessionRefs();
-
-        try {
-            while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-                if (attempt != 0) {
-                    log.warning("Trying " + (attempt + 1) + " attempt to apply " + mutations.size() + " mutations " +
-                            "performed withing Ignite transaction into Cassandra");
-                }
-
-                WrappedPreparedStatement prepStatement = null;
-                WrappedSession ses = null;
-
-                try {
-                    BatchStatement batch = new BatchStatement();
-
-                    // accumulating all the mutations into one Cassandra logged batch
-                    for (Mutation mutation : mutations) {
-                        String key = mutation.getTable() + mutation.getClass().getName();
-                        prepStatement = statements.get(key);
-
-                        if (prepStatement == null) {
-                            prepStatement = prepareStatement(mutation.getTable(), mutation.getStatement(),
-                                    mutation.getPersistenceSettings(), mutation.tableExistenceRequired());
-
-                            if (prepStatement != null)
-                                statements.put(key, prepStatement);
-                        }
-
-                        if (prepStatement != null)
-                            batch.add(mutation.bindStatement(prepStatement));
-
-                        if (attempt == 0) {
-                            if (mutation.tableExistenceRequired()) {
-                                tableExistenceRequired = true;
-
-                                if (!tableSettings.containsKey(mutation.getTable()))
-                                    tableSettings.put(mutation.getTable(), mutation.getPersistenceSettings());
-                            }
-                        }
-                    }
-
-                    // committing logged batch into Cassandra
-                    if (batch.size() > 0) {
-                        ses = session();
-                        ses.execute(tuneStatementExecutionOptions(batch));
-                    }
-
-                    return;
-                }
-                catch (Throwable e) {
-                    error = e;
-
-                    if (CassandraHelper.isTableAbsenceError(e)) {
-                        if (tableExistenceRequired) {
-                            for (Map.Entry<String, KeyValuePersistenceSettings> entry : tableSettings.entrySet())
-                                handleTableAbsenceError(entry.getKey(), entry.getValue());
-                        }
-                        else
-                            return;
-                    }
-                    else if (CassandraHelper.isHostsAvailabilityError(e)) {
-                        if (handleHostsAvailabilityError(ses == null ? 0 : ses.generation, e, attempt, errorMsg))
-                            statements.clear();
-                    }
-                    else if (CassandraHelper.isPreparedStatementClusterError(e)) {
-                        handlePreparedStatementClusterError(prepStatement == null ? 0 : prepStatement.generation, e);
-                        statements.clear();
-                    }
-                    else {
-                        // For an error which we don't know how to handle, we will not try next attempts and terminate.
-                        throw new IgniteException(errorMsg, e);
-                    }
-                }
-
-                if (!CassandraHelper.isTableAbsenceError(error))
-                    sleeper.sleep();
-
-                attempt++;
-            }
-        }
-        catch (Throwable e) {
-            error = e;
-        }
-        finally {
-            decrementSessionRefs();
-        }
-
-        log.error(errorMsg, error);
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /** {@inheritDoc} */
-    @Override public synchronized void close() throws IOException {
-        if (decrementSessionRefs() == 0 && wrapperSes != null) {
-            SessionPool.put(this, wrapperSes.ses, expirationTimeout);
-            wrapperSes = null;
-        }
-    }
-
-    /**
-     * Recreates Cassandra driver session.
-     */
-    private synchronized void refresh() {
-        //make sure that session removed from the pool
-        SessionPool.get(this);
-
-        //closing and reopening session
-        if (wrapperSes != null)
-            CassandraHelper.closeSession(wrapperSes.ses);
-
-        wrapperSes = null;
-
-        session();
-    }
-
-    /**
-     * Returns Cassandra session and its generation number.
-     *
-     * @return Wrapper object providing Cassandra session and its generation number.
-     */
-    private synchronized WrappedSession session() {
-        if (wrapperSes != null)
-            return wrapperSes;
-
-        Session ses = SessionPool.get(this);
-
-        if (ses != null) {
-            this.wrapperSes = new WrappedSession(ses, generation);
-            return this.wrapperSes;
-        }
-
-        synchronized (sesStatements) {
-            sesStatements.clear();
-        }
-
-        try {
-            ses = builder.build().connect();
-            generation++;
-            this.wrapperSes = new WrappedSession(ses, generation);
-        }
-        catch (Throwable e) {
-            throw new IgniteException("Failed to establish session with Cassandra database", e);
-        }
-
-        return this.wrapperSes;
-    }
-
-    /**
-     * Increments number of references to Cassandra driver session (required for multithreaded environment).
-     */
-    private synchronized void incrementSessionRefs() {
-        refCnt++;
-    }
-
-    /**
-     * Decrements number of references to Cassandra driver session (required for multithreaded environment).
-     */
-    private synchronized int decrementSessionRefs() {
-        if (refCnt != 0)
-            refCnt--;
-
-        return refCnt;
-    }
-
-    /**
-     * Prepares CQL statement using current Cassandra driver session.
-     *
-     * @param statement CQL statement.
-     * @param settings Persistence settings.
-     * @param tblExistenceRequired Flag indicating if table existence is required for the statement.
-     * @return Prepared statement.
-     */
-    private WrappedPreparedStatement prepareStatement(String table, String statement, KeyValuePersistenceSettings settings,
-                                                      boolean tblExistenceRequired) {
-
-        int attempt = 0;
-        Throwable error = null;
-        String errorMsg = "Failed to prepare Cassandra CQL statement: " + statement;
-
-        RandomSleeper sleeper = newSleeper();
-
-        incrementSessionRefs();
-
-        try {
-            synchronized (sesStatements) {
-                WrappedPreparedStatement wrapper = sesStatements.get(statement);
-
-                if (wrapper != null) {
-                    // Prepared statement is still actual, cause it was created with the current Cassandra session.
-                    if (generation == wrapper.generation)
-                        return wrapper;
-                    // Prepared statement is not actual anymore, cause it was created with the previous Cassandra session.
-                    else
-                        sesStatements.remove(statement);
-                }
-            }
-
-            while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-                WrappedSession ses = null;
-
-                try {
-                    ses = session();
-
-                    WrappedPreparedStatement prepStatement = ses.prepare(statement);
-
-                    synchronized (sesStatements) {
-                        sesStatements.put(statement, prepStatement);
-                    }
-
-                    return prepStatement;
-                }
-                catch (Throwable e) {
-                    if (CassandraHelper.isTableAbsenceError(e)) {
-                        if (!tblExistenceRequired)
-                            return null;
-
-                        handleTableAbsenceError(table, settings);
-                    }
-                    else if (CassandraHelper.isHostsAvailabilityError(e))
-                        handleHostsAvailabilityError(ses == null ? 0 : ses.generation, e, attempt, errorMsg);
-                    else
-                        throw new IgniteException(errorMsg, e);
-
-                    error = e;
-                }
-
-                if (!CassandraHelper.isTableAbsenceError(error))
-                    sleeper.sleep();
-
-                attempt++;
-            }
-        }
-        finally {
-            decrementSessionRefs();
-        }
-
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /**
-     * Creates Cassandra keyspace.
-     *
-     * @param settings Persistence settings.
-     */
-    private void createKeyspace(KeyValuePersistenceSettings settings) {
-        int attempt = 0;
-        Throwable error = null;
-        String errorMsg = "Failed to create Cassandra keyspace '" + settings.getKeyspace() + "'";
-
-        while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-            WrappedSession ses = null;
-
-            try {
-                ses = session();
-
-                if (log.isInfoEnabled()) {
-                    log.info("-----------------------------------------------------------------------");
-                    log.info("Creating Cassandra keyspace '" + settings.getKeyspace() + "'");
-                    log.info("-----------------------------------------------------------------------\n\n" +
-                        settings.getKeyspaceDDLStatement() + "\n");
-                    log.info("-----------------------------------------------------------------------");
-                }
-
-                ses.execute(settings.getKeyspaceDDLStatement());
-
-                if (log.isInfoEnabled())
-                    log.info("Cassandra keyspace '" + settings.getKeyspace() + "' was successfully created");
-
-                return;
-            }
-            catch (AlreadyExistsException ignored) {
-                if (log.isInfoEnabled())
-                    log.info("Cassandra keyspace '" + settings.getKeyspace() + "' already exist");
-
-                return;
-            }
-            catch (Throwable e) {
-                if (!CassandraHelper.isHostsAvailabilityError(e))
-                    throw new IgniteException(errorMsg, e);
-
-                handleHostsAvailabilityError(ses == null ? 0 : ses.generation, e, attempt, errorMsg);
-
-                error = e;
-            }
-
-            attempt++;
-        }
-
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /**
-     * Creates Cassandra table.
-     *
-     * @param settings Persistence settings.
-     */
-    private void createTable(String table, KeyValuePersistenceSettings settings) {
-        int attempt = 0;
-        Throwable error = null;
-        String tableFullName = settings.getKeyspace() + "." + table;
-        String errorMsg = "Failed to create Cassandra table '" + tableFullName + "'";
-
-        while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-            WrappedSession ses = null;
-
-            try {
-                ses = session();
-
-                if (log.isInfoEnabled()) {
-                    log.info("-----------------------------------------------------------------------");
-                    log.info("Creating Cassandra table '" + tableFullName + "'");
-                    log.info("-----------------------------------------------------------------------\n\n" +
-                        settings.getTableDDLStatement(table) + "\n");
-                    log.info("-----------------------------------------------------------------------");
-                }
-
-                ses.execute(settings.getTableDDLStatement(table));
-
-                if (log.isInfoEnabled())
-                    log.info("Cassandra table '" + tableFullName + "' was successfully created");
-
-                return;
-            }
-            catch (AlreadyExistsException ignored) {
-                if (log.isInfoEnabled())
-                    log.info("Cassandra table '" + tableFullName + "' already exist");
-
-                return;
-            }
-            catch (Throwable e) {
-                if (!CassandraHelper.isHostsAvailabilityError(e) && !CassandraHelper.isKeyspaceAbsenceError(e))
-                    throw new IgniteException(errorMsg, e);
-
-                if (CassandraHelper.isKeyspaceAbsenceError(e)) {
-                    log.warning("Failed to create Cassandra table '" + tableFullName +
-                        "' cause appropriate keyspace doesn't exist", e);
-                    createKeyspace(settings);
-                }
-                else if (CassandraHelper.isHostsAvailabilityError(e))
-                    handleHostsAvailabilityError(ses == null ? 0 : ses.generation, e, attempt, errorMsg);
-
-                error = e;
-            }
-
-            attempt++;
-        }
-
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /**
-     * Creates Cassandra table indexes.
-     *
-     * @param settings Persistence settings.
-     */
-    private void createTableIndexes(String table, KeyValuePersistenceSettings settings) {
-        List<String> indexDDLStatements = settings.getIndexDDLStatements(table);
-
-        if (indexDDLStatements == null || indexDDLStatements.isEmpty())
-            return;
-
-        int attempt = 0;
-        Throwable error = null;
-        String tableFullName = settings.getKeyspace() + "." + table;
-        String errorMsg = "Failed to create indexes for Cassandra table " + tableFullName;
-
-        while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) {
-            WrappedSession ses = null;
-
-            try {
-                ses = session();
-
-                if (log.isInfoEnabled()) {
-                    log.info("-----------------------------------------------------------------------");
-                    log.info("Creating indexes for Cassandra table '" + tableFullName + "'");
-                    log.info("-----------------------------------------------------------------------");
-                }
-
-                for (String statement : indexDDLStatements) {
-                    try {
-                        if (log.isInfoEnabled()) {
-                            log.info(statement);
-                            log.info("-----------------------------------------------------------------------");
-                        }
-
-                        ses.execute(statement);
-                    }
-                    catch (AlreadyExistsException ignored) {
-                    }
-                    catch (Throwable e) {
-                        if (!(e instanceof InvalidQueryException) || !"Index already exists".equals(e.getMessage()))
-                            throw new IgniteException(errorMsg, e);
-                    }
-                }
-
-                if (log.isInfoEnabled())
-                    log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created");
-
-                return;
-            }
-            catch (Throwable e) {
-                if (CassandraHelper.isHostsAvailabilityError(e))
-                    handleHostsAvailabilityError(ses == null ? 0 : ses.generation, e, attempt, errorMsg);
-                else if (CassandraHelper.isTableAbsenceError(e))
-                    createTable(table, settings);
-                else
-                    throw new IgniteException(errorMsg, e);
-
-                error = e;
-            }
-
-            attempt++;
-        }
-
-        throw new IgniteException(errorMsg, error);
-    }
-
-    /**
-     * Tunes CQL statement execution options (consistency level, fetch option and etc.).
-     *
-     * @param statement Statement.
-     * @return Modified statement.
-     */
-    private Statement tuneStatementExecutionOptions(Statement statement) {
-        String qry = "";
-
-        if (statement instanceof BoundStatement)
-            qry = ((BoundStatement)statement).preparedStatement().getQueryString().trim().toLowerCase();
-        else if (statement instanceof PreparedStatement)
-            qry = ((PreparedStatement)statement).getQueryString().trim().toLowerCase();
-
-        boolean readStatement = qry.startsWith("select");
-        boolean writeStatement = statement instanceof Batch || statement instanceof BatchStatement ||
-            qry.startsWith("insert") || qry.startsWith("delete") || qry.startsWith("update");
-
-        if (readStatement && readConsistency != null)
-            statement.setConsistencyLevel(readConsistency);
-
-        if (writeStatement && writeConsistency != null)
-            statement.setConsistencyLevel(writeConsistency);
-
-        if (fetchSize != null)
-            statement.setFetchSize(fetchSize);
-
-        return statement;
-    }
-
-    /**
-     * Handles situation when Cassandra table doesn't exist.
-     *
-     * @param settings Persistence settings.
-     */
-    private void handleTableAbsenceError(String table, KeyValuePersistenceSettings settings) {
-        String tableFullName = settings.getKeyspace() + "." + table;
-
-        AtomicInteger counter = tblAbsenceHandlersCnt.computeIfAbsent(tableFullName, k -> new AtomicInteger(-1));
-
-        int hndNum = counter.incrementAndGet();
-
-        try {
-            synchronized (counter) {
-                // Oooops... I am not the first thread who tried to handle table absence problem.
-                if (hndNum != 0) {
-                    log.warning("Table " + tableFullName + " absence problem detected. " +
-                            "Another thread already fixed it.");
-                    return;
-                }
-
-                log.warning("Table " + tableFullName + " absence problem detected. " +
-                        "Trying to create table.");
-
-                createKeyspace(settings);
-                createTable(table, settings);
-                createTableIndexes(table, settings);
-            }
-        }
-        finally {
-            if (hndNum == 0)
-                counter.set(-1);
-        }
-    }
-
-    /**
-     * Handles situation when prepared statement execution failed cause session to the cluster was released.
-     *
-     * @param sesGeneration Generation of Cassandra session used to create prepared statement.
-     * @param e Exception thrown during statement execution.
-     */
-    private void handlePreparedStatementClusterError(long sesGeneration, Throwable e) {
-        if (sesGeneration < generation) {
-            log.warning("Prepared statement cluster error detected, another thread already fixed the problem", e);
-            return;
-        }
-
-        refreshLock.lock();
-
-        try {
-            if (sesGeneration < generation) {
-                log.warning("Prepared statement cluster error detected, another thread already fixed the problem", e);
-                return;
-            }
-
-            log.warning("Prepared statement cluster error detected, refreshing Cassandra session", e);
-
-            refresh();
-
-            log.warning("Cassandra session refreshed");
-        }
-        finally {
-            refreshLock.unlock();
-        }
-    }
-
-    /**
-     * Handles situation when Cassandra host which is responsible for CQL query execution became unavailable.
-     *
-     * @param sesGeneration Generation of Cassandra session used to run CQL statement.
-     * @param e Exception to handle.
-     * @param attempt Number of attempts.
-     * @param msg Error message.
-     * @return {@code true} if host unavailability was successfully handled.
-     */
-    private boolean handleHostsAvailabilityError(long sesGeneration, Throwable e, int attempt, String msg) {
-        if (attempt >= CQL_EXECUTION_ATTEMPTS_COUNT) {
-            log.error("Host availability problem detected. " +
-                    "Number of CQL execution attempts reached maximum " + CQL_EXECUTION_ATTEMPTS_COUNT +
-                    ", exception will be thrown to upper execution layer.", e);
-            throw msg == null ? new IgniteException(e) : new IgniteException(msg, e);
-        }
-
-        if (attempt == CQL_EXECUTION_ATTEMPTS_COUNT / 4 ||
-            attempt == CQL_EXECUTION_ATTEMPTS_COUNT / 2 ||
-            attempt == CQL_EXECUTION_ATTEMPTS_COUNT / 2 + CQL_EXECUTION_ATTEMPTS_COUNT / 4 ||
-            attempt == CQL_EXECUTION_ATTEMPTS_COUNT - 1) {
-
-            refreshLock.lock();
-
-            try {
-                if (sesGeneration < generation)
-                    log.warning("Host availability problem detected, but already handled by another thread");
-                else {
-                    log.warning("Host availability problem detected, CQL execution attempt  " + (attempt + 1) + ", " +
-                            "refreshing Cassandra session", e);
-
-                    refresh();
-
-                    log.warning("Cassandra session refreshed");
-
-                    return true;
-                }
-            }
-            finally {
-                refreshLock.unlock();
-            }
-        }
-
-        log.warning("Host availability problem detected, CQL execution attempt " + (attempt + 1) + ", " +
-                "sleeping extra " + CQL_EXECUTION_ATTEMPT_MAX_TIMEOUT + " milliseconds", e);
-
-        try {
-            Thread.sleep(CQL_EXECUTION_ATTEMPT_MAX_TIMEOUT);
-        }
-        catch (InterruptedException ignored) {
-        }
-
-        log.warning("Sleep completed");
-
-        return false;
-    }
-
-    /**
-     * @return New random sleeper.
-     */
-    private RandomSleeper newSleeper() {
-        return new RandomSleeper(CQL_EXECUTION_ATTEMPT_MIN_TIMEOUT,
-                CQL_EXECUTION_ATTEMPT_MAX_TIMEOUT,
-                CQL_ATTEMPTS_TIMEOUT_INCREMENT, log);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
deleted file mode 100644
index b0dba8bf79f0a..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/ExecutionAssistant.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.Row;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-
-/**
- * Provides information for single operations (load, delete, write) of Ignite cache
- * backed by {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}.
- *
- * @param <R> type of the result returned from operation.
- */
-public interface ExecutionAssistant<R> {
-    /**
-     * Indicates if Cassandra table existence is required for an operation.
-     *
-     * @return true if table existence required.
-     */
-    public boolean tableExistenceRequired();
-
-    /**
-     * Cassandra table to use for an operation.
-     *
-     * @return Table name.
-     */
-    public String getTable();
-
-    /**
-     * Returns CQL statement to be used for an operation.
-     *
-     * @return CQL statement.
-     */
-    public String getStatement();
-
-    /**
-     * Binds prepared statement.
-     *
-     * @param statement prepared statement.
-     *
-     * @return bound statement.
-     */
-    public BoundStatement bindStatement(PreparedStatement statement);
-
-    /**
-     * Persistence settings to use for an operation.
-     *
-     * @return persistence settings.
-     */
-    public KeyValuePersistenceSettings getPersistenceSettings();
-
-    /**
-     * Returns operation name.
-     *
-     * @return operation name.
-     */
-    public String operationName();
-
-    /**
-     * Processes Cassandra database table row returned by specified CQL statement.
-     *
-     * @param row Cassandra database table row.
-     *
-     * @return result of the operation.
-     */
-    public R process(Row row);
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java
deleted file mode 100644
index 1133caf3a5726..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/GenericBatchExecutionAssistant.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import java.util.HashSet;
-import java.util.Set;
-import com.datastax.driver.core.Row;
-
-/**
- * Implementation of the {@link org.apache.ignite.cache.store.cassandra.session.BatchExecutionAssistant}.
- *
- * @param <R> Type of the result returned from batch operation
- * @param <V> Type of the value used in batch operation
- */
-public abstract class GenericBatchExecutionAssistant<R, V> implements BatchExecutionAssistant<R, V> {
-    /** Identifiers of already processed objects. */
-    private Set<Integer> processed = new HashSet<>();
-
-    /** {@inheritDoc} */
-    @Override public void process(Row row, int seqNum) {
-        if (processed.contains(seqNum))
-            return;
-
-        process(row);
-
-        processed.add(seqNum);
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean alreadyProcessed(int seqNum) {
-        return processed.contains(seqNum);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int processedCount() {
-        return processed.size();
-    }
-
-    /** {@inheritDoc} */
-    @Override public R processedData() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean tableExistenceRequired() {
-        return false;
-    }
-
-    /**
-     * Processes particular row inside batch operation.
-     *
-     * @param row Row to process.
-     */
-    protected void process(Row row) {
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java
deleted file mode 100644
index ab0795bf48108..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/LoadCacheCustomQueryWorker.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import java.util.concurrent.Callable;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.SimpleStatement;
-import com.datastax.driver.core.Statement;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
-import org.apache.ignite.lang.IgniteBiInClosure;
-
-/**
- * Worker for load cache using custom user query.
- *
- * @param <K> Key type.
- * @param <V> Value type.
- */
-public class LoadCacheCustomQueryWorker<K, V> implements Callable<Void> {
-    /** Cassandra session to execute CQL query */
-    private final CassandraSession ses;
-
-    /** Statement. */
-    private final Statement stmt;
-
-    /** Persistence controller */
-    private final PersistenceController ctrl;
-
-    /** Logger */
-    private final IgniteLogger log;
-
-    /** Closure for loaded values. */
-    private final IgniteBiInClosure<K, V> clo;
-
-    /**
-     * @param ses Session.
-     * @param qry Query.
-     * @param ctrl Control.
-     * @param log Logger.
-     * @param clo Closure for loaded values.
-     */
-    public LoadCacheCustomQueryWorker(CassandraSession ses, String qry, PersistenceController ctrl,
-                                      IgniteLogger log, IgniteBiInClosure<K, V> clo) {
-        this(ses, new SimpleStatement(qry.trim().endsWith(";") ? qry : qry + ';'), ctrl, log, clo);
-    }
-
-    /**
-     * @param ses Session.
-     * @param stmt Statement.
-     * @param ctrl Control.
-     * @param log Logger.
-     * @param clo Closure for loaded values.
-     */
-    public LoadCacheCustomQueryWorker(CassandraSession ses, Statement stmt, PersistenceController ctrl,
-                                      IgniteLogger log, IgniteBiInClosure<K, V> clo) {
-        this.ses = ses;
-        this.stmt = stmt;
-        this.ctrl = ctrl;
-        this.log = log;
-        this.clo = clo;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Void call() throws Exception {
-        ses.execute(new BatchLoaderAssistant() {
-            /** {@inheritDoc} */
-            @Override public String operationName() {
-                return "loadCache";
-            }
-
-            /** {@inheritDoc} */
-            @Override public Statement getStatement() {
-                return stmt;
-            }
-
-            /** {@inheritDoc} */
-            @Override public void process(Row row) {
-                K key;
-                V val;
-
-                try {
-                    key = (K)ctrl.buildKeyObject(row);
-                }
-                catch (Throwable e) {
-                    log.error("Failed to build Ignite key object from provided Cassandra row", e);
-
-                    throw new IgniteException("Failed to build Ignite key object from provided Cassandra row", e);
-                }
-
-                try {
-                    val = (V)ctrl.buildValueObject(row);
-                }
-                catch (Throwable e) {
-                    log.error("Failed to build Ignite value object from provided Cassandra row", e);
-
-                    throw new IgniteException("Failed to build Ignite value object from provided Cassandra row", e);
-                }
-
-                clo.apply(key, val);
-            }
-        });
-
-        return null;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedPreparedStatement.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedPreparedStatement.java
deleted file mode 100644
index 46d5306311066..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedPreparedStatement.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import java.nio.ByteBuffer;
-import java.util.Map;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.CodecRegistry;
-import com.datastax.driver.core.ColumnDefinitions;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.PreparedId;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.policies.RetryPolicy;
-
-/**
- * Simple wrapper providing access to Cassandra prepared statement and generation of Cassandra
- * session which was used to create this statement
- */
-public class WrappedPreparedStatement implements PreparedStatement {
-    /** Prepared statement. **/
-    private final PreparedStatement st;
-
-    /** Generation of Cassandra session which was used to prepare this statement. **/
-    final long generation;
-
-    /**
-     * Constructor.
-     *
-     * @param st Prepared statement.
-     * @param generation Generation of Cassandra session used to prepare this statement.
-     */
-    WrappedPreparedStatement(PreparedStatement st, long generation) {
-        this.st = st;
-        this.generation = generation;
-    }
-
-    /**
-     * Getter for wrapped statement.
-     *
-     * @return Wrapped original statement.
-     */
-    public PreparedStatement getWrappedStatement() {
-        return st;
-    }
-
-    /** {@inheritDoc} */
-    @Override public ColumnDefinitions getVariables() {
-        return st.getVariables();
-    }
-
-    /** {@inheritDoc} */
-    @Override public BoundStatement bind(Object... values) {
-        return st.bind(values);
-    }
-
-    /** {@inheritDoc} */
-    @Override public BoundStatement bind() {
-        return st.bind();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement setRoutingKey(ByteBuffer routingKey) {
-        return st.setRoutingKey(routingKey);
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents) {
-        return st.setRoutingKey(routingKeyComponents);
-    }
-
-    /** {@inheritDoc} */
-    @Override public ByteBuffer getRoutingKey() {
-        return st.getRoutingKey();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement setConsistencyLevel(ConsistencyLevel consistency) {
-        return st.setConsistencyLevel(consistency);
-    }
-
-    /** {@inheritDoc} */
-    @Override public ConsistencyLevel getConsistencyLevel() {
-        return st.getConsistencyLevel();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement setSerialConsistencyLevel(ConsistencyLevel serialConsistency) {
-        return st.setSerialConsistencyLevel(serialConsistency);
-    }
-
-    /** {@inheritDoc} */
-    @Override public ConsistencyLevel getSerialConsistencyLevel() {
-        return st.getSerialConsistencyLevel();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getQueryString() {
-        return st.getQueryString();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getQueryKeyspace() {
-        return st.getQueryKeyspace();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement enableTracing() {
-        return st.enableTracing();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement disableTracing() {
-        return st.disableTracing();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isTracing() {
-        return st.isTracing();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement setRetryPolicy(RetryPolicy policy) {
-        return st.setRetryPolicy(policy);
-    }
-
-    /** {@inheritDoc} */
-    @Override public RetryPolicy getRetryPolicy() {
-        return st.getRetryPolicy();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedId getPreparedId() {
-        return st.getPreparedId();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Map<String, ByteBuffer> getIncomingPayload() {
-        return st.getIncomingPayload();
-    }
-
-    /** {@inheritDoc} */
-    @Override public Map<String, ByteBuffer> getOutgoingPayload() {
-        return st.getOutgoingPayload();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement setOutgoingPayload(Map<String, ByteBuffer> payload) {
-        return st.setOutgoingPayload(payload);
-    }
-
-    /** {@inheritDoc} */
-    @Override public CodecRegistry getCodecRegistry() {
-        return st.getCodecRegistry();
-    }
-
-    /** {@inheritDoc} */
-    @Override public PreparedStatement setIdempotent(Boolean idempotent) {
-        return st.setIdempotent(idempotent);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Boolean isIdempotent() {
-        return st.isIdempotent();
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedSession.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedSession.java
deleted file mode 100644
index d9b722435aaf6..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/WrappedSession.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
-
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.ResultSetFuture;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.Statement;
-import com.datastax.driver.core.exceptions.NoHostAvailableException;
-
-/**
- * Simple container for Cassandra session and its generation number.
- */
-public class WrappedSession {
-    /** Cassandra driver session. **/
-    final Session ses;
-
-    /** Cassandra session generation number. **/
-    final long generation;
-
-    /**
-     * Constructor.
-     *
-     * @param ses Cassandra session.
-     * @param generation Cassandra session generation number.
-     */
-    WrappedSession(Session ses, long generation) {
-        this.ses = ses;
-        this.generation = generation;
-    }
-
-    /**
-     * Prepares the provided query string.
-     *
-     * @param query the CQL query string to prepare
-     * @return the prepared statement corresponding to {@code query}.
-     * @throws NoHostAvailableException if no host in the cluster can be
-     *                                  contacted successfully to prepare this query.
-     */
-    WrappedPreparedStatement prepare(String query) {
-        return new WrappedPreparedStatement(ses.prepare(query), generation);
-    }
-
-    /**
-     * Executes the provided query.
-     *
-     * @param statement The CQL query to execute (that can be any {@link Statement}).
-     *
-     * @return The result of the query. That result will never be null but can
-     */
-    ResultSet execute(Statement statement) {
-        return ses.execute(statement);
-    }
-
-    /**
-     * Executes the provided query.
-     *
-     * @param query The CQL query to execute (that can be any {@link Statement}).
-     *
-     * @return The result of the query. That result will never be null but can
-     */
-    ResultSet execute(String query) {
-        return ses.execute(query);
-    }
-
-    /**
-     * Executes the provided query asynchronously.
-     *
-     * @param statement the CQL query to execute (that can be any {@code Statement}).
-     *
-     * @return a future on the result of the query.
-     */
-    ResultSetFuture executeAsync(Statement statement) {
-        return ses.executeAsync(statement);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java
deleted file mode 100644
index 9c8b917babce7..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains classes responsible for handling sessions and communication with Cassandra
- */
-
-package org.apache.ignite.cache.store.cassandra.session;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/IdleSession.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/IdleSession.java
deleted file mode 100644
index 0faf4d3a9981f..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/IdleSession.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session.pool;
-
-import com.datastax.driver.core.Session;
-import org.apache.ignite.cache.store.cassandra.common.CassandraHelper;
-
-/**
- * Simple wrapper for idle Cassandra session returned to pool, responsible for monitoring session expiration and its closing.
- */
-public class IdleSession {
-    /** Cassandra driver session. */
-    private Session ses;
-
-    /** Expiration timeout. */
-    private long expirationTimeout;
-
-    /** Wrapper creation time.  */
-    private long time;
-
-    /**
-     * Creates instance of Cassandra driver session wrapper.
-     *
-     * @param ses Cassandra driver session.
-     * @param expirationTimeout Session expiration timeout.
-     */
-    public IdleSession(Session ses, long expirationTimeout) {
-        this.ses = ses;
-        this.expirationTimeout = expirationTimeout;
-        this.time = System.currentTimeMillis();
-    }
-
-    /**
-     * Checks if Cassandra driver session expired.
-     *
-     * @return true if session expired.
-     */
-    public boolean expired() {
-        return expirationTimeout > 0 && System.currentTimeMillis() - time > expirationTimeout;
-    }
-
-    /**
-     * Returns wrapped Cassandra driver session.
-     *
-     * @return Cassandra driver session.
-     */
-    public Session driverSession() {
-        return ses;
-    }
-
-    /**
-     * Closes wrapped Cassandra driver session
-     */
-    public void release() {
-        CassandraHelper.closeSession(ses);
-        ses = null;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
deleted file mode 100644
index 3fd48017036ba..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/SessionPool.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session.pool;
-
-import java.lang.Thread.State;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import com.datastax.driver.core.Session;
-import org.apache.ignite.cache.store.cassandra.session.CassandraSessionImpl;
-
-/**
- * Cassandra driver sessions pool.
- */
-public class SessionPool {
-    /**
-     * Monitors session pool and closes unused session.
-     */
-    private static class SessionMonitor extends Thread {
-        /** {@inheritDoc} */
-        @Override public void run() {
-            try {
-                while (true) {
-                    try {
-                        Thread.sleep(SLEEP_TIMEOUT);
-                    }
-                    catch (InterruptedException ignored) {
-                        return;
-                    }
-
-                    List<Map.Entry<CassandraSessionImpl, IdleSession>> expiredSessions = new LinkedList<>();
-
-                    int sessionsCnt;
-
-                    synchronized (sessions) {
-                        sessionsCnt = sessions.size();
-
-                        for (Map.Entry<CassandraSessionImpl, IdleSession> entry : sessions.entrySet()) {
-                            if (entry.getValue().expired())
-                                expiredSessions.add(entry);
-                        }
-
-                        for (Map.Entry<CassandraSessionImpl, IdleSession> entry : expiredSessions)
-                            sessions.remove(entry.getKey());
-                    }
-
-                    for (Map.Entry<CassandraSessionImpl, IdleSession> entry : expiredSessions)
-                        entry.getValue().release();
-
-                    // all sessions in the pool expired, thus we don't need additional thread to manage sessions in the pool
-                    if (sessionsCnt == expiredSessions.size())
-                        return;
-                }
-            }
-            finally {
-                release();
-            }
-        }
-    }
-
-    /** Sessions monitor sleep timeout. */
-    private static final long SLEEP_TIMEOUT = 60000; // 1 minute.
-
-    /** Sessions which were returned to pool. */
-    private static final Map<CassandraSessionImpl, IdleSession> sessions = new HashMap<>();
-
-    /** Singleton instance. */
-    private static SessionMonitor monitorSingleton;
-
-    static {
-        Runtime.getRuntime().addShutdownHook(new Thread() {
-            @Override public void run() {
-                release();
-            }
-        });
-    }
-
-    /**
-     * Returns Cassandra driver session to sessions pool.
-     *
-     * @param cassandraSes Session wrapper.
-     * @param driverSes Driver session.
-     * @param expirationTimeout Expiration timeout.
-     */
-    public static void put(CassandraSessionImpl cassandraSes, Session driverSes, long expirationTimeout) {
-        if (cassandraSes == null || driverSes == null)
-            return;
-
-        IdleSession old;
-
-        synchronized (sessions) {
-            old = sessions.put(cassandraSes, new IdleSession(driverSes, expirationTimeout));
-
-            if (monitorSingleton == null || State.TERMINATED.equals(monitorSingleton.getState())) {
-                monitorSingleton = new SessionMonitor();
-                monitorSingleton.setDaemon(true);
-                monitorSingleton.setName("Cassandra-sessions-pool");
-                monitorSingleton.start();
-            }
-        }
-
-        if (old != null)
-            old.release();
-    }
-
-    /**
-     * Extracts Cassandra driver session from pool.
-     *
-     * @param cassandraSes Session wrapper.
-     * @return Cassandra driver session.
-     */
-    public static Session get(CassandraSessionImpl cassandraSes) {
-        if (cassandraSes == null)
-            return null;
-
-        IdleSession wrapper;
-
-        synchronized (sessions) {
-            wrapper = sessions.remove(cassandraSes);
-        }
-
-        return wrapper == null ? null : wrapper.driverSession();
-    }
-
-    /**
-     * Releases all session from pool and closes all their connections to Cassandra database.
-     */
-    public static void release() {
-        Collection<IdleSession> wrappers;
-
-        synchronized (sessions) {
-            try {
-                if (sessions.isEmpty())
-                    return;
-
-                wrappers = new LinkedList<>();
-
-                for (IdleSession wrapper : sessions.values())
-                    wrappers.add(wrapper);
-
-                sessions.clear();
-            }
-            finally {
-                if (!(Thread.currentThread() instanceof SessionMonitor) && monitorSingleton != null) {
-                    try {
-                        monitorSingleton.interrupt();
-                    }
-                    catch (Throwable ignored) {
-                    }
-                }
-            }
-        }
-
-        for (IdleSession wrapper : wrappers)
-            wrapper.release();
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java
deleted file mode 100644
index 4460793551f9f..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/pool/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains session pool implenetation for Cassandra sessions
- */
-
-package org.apache.ignite.cache.store.cassandra.session.pool;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/BaseMutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/BaseMutation.java
deleted file mode 100644
index 2625e87619ea2..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/BaseMutation.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session.transaction;
-
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
-
-/**
- * Base class to inherit from to implement specific mutations operation.
- */
-public abstract class BaseMutation implements Mutation {
-    /** Cassandra table to use. */
-    private final String table;
-
-    /** Persistence controller to be utilized for mutation. */
-    private final PersistenceController ctrl;
-
-    /**
-     * Creates instance of mutation operation.
-     *
-     * @param table Cassandra table which should be used for the mutation.
-     * @param ctrl Persistence controller to use.
-     */
-    public BaseMutation(String table, PersistenceController ctrl) {
-        if (table == null || table.trim().isEmpty())
-            throw new IllegalArgumentException("Table name should be specified");
-
-        if (ctrl == null)
-            throw new IllegalArgumentException("Persistence controller should be specified");
-
-        this.table = table;
-        this.ctrl = ctrl;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getTable() {
-        return table;
-    }
-
-    /** {@inheritDoc} */
-    @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-        return ctrl.getPersistenceSettings();
-    }
-
-    /**
-     * Service method to get persistence controller instance
-     *
-     * @return Persistence controller to use for the mutation
-     */
-    protected PersistenceController controller() {
-        return ctrl;
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/DeleteMutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/DeleteMutation.java
deleted file mode 100644
index 79c0bfe08186e..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/DeleteMutation.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session.transaction;
-
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
-
-/**
- * Mutation which deletes object from Cassandra.
- */
-public class DeleteMutation extends BaseMutation {
-    /** Ignite cache key of the object which should be deleted. */
-    private final Object key;
-
-    /**
-     * Creates instance of delete mutation operation.
-     *
-     * @param key Ignite cache key of the object which should be deleted.
-     * @param table Cassandra table which should be used for the mutation.
-     * @param ctrl Persistence controller to use.
-     */
-    public DeleteMutation(Object key, String table, PersistenceController ctrl) {
-        super(table, ctrl);
-        this.key = key;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean tableExistenceRequired() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getStatement() {
-        return controller().getDeleteStatement(getTable());
-    }
-
-    /** {@inheritDoc} */
-    @Override public BoundStatement bindStatement(PreparedStatement statement) {
-        return controller().bindKey(statement, key);
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/Mutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/Mutation.java
deleted file mode 100644
index f3fb35480c8a1..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/Mutation.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session.transaction;
-
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-
-/**
- * Provides information about particular mutation operation performed withing transaction.
- */
-public interface Mutation {
-    /**
-     * Cassandra table to use for an operation.
-     *
-     * @return Table name.
-     */
-    public String getTable();
-
-    /**
-     * Indicates if Cassandra tables existence is required for this operation.
-     *
-     * @return {@code true} true if table existence required.
-     */
-    public boolean tableExistenceRequired();
-
-    /**
-     *  Returns Ignite cache key/value persistence settings.
-     *
-     * @return persistence settings.
-     */
-    public KeyValuePersistenceSettings getPersistenceSettings();
-
-    /**
-     * Returns unbind CLQ statement for to be executed.
-     *
-     * @return Unbind CQL statement.
-     */
-    public String getStatement();
-
-    /**
-     * Binds prepared statement to current Cassandra session.
-     *
-     * @param statement Statement.
-     * @return Bounded statement.
-     */
-    public BoundStatement bindStatement(PreparedStatement statement);
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/WriteMutation.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/WriteMutation.java
deleted file mode 100644
index 22ecf2a78b53d..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/WriteMutation.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.session.transaction;
-
-import javax.cache.Cache;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.PreparedStatement;
-import org.apache.ignite.cache.store.cassandra.persistence.PersistenceController;
-
-/**
- * Mutation which writes(inserts) object into Cassandra.
- */
-public class WriteMutation extends BaseMutation {
-    /** Ignite cache entry to be inserted into Cassandra. */
-    private final Cache.Entry entry;
-
-    /**
-     * Creates instance of delete mutation operation.
-     *
-     * @param entry Ignite cache entry to be inserted into Cassandra.
-     * @param table Cassandra table which should be used for the mutation.
-     * @param ctrl Persistence controller to use.
-     */
-    public WriteMutation(Cache.Entry entry, String table, PersistenceController ctrl) {
-        super(table, ctrl);
-        this.entry = entry;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean tableExistenceRequired() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getStatement() {
-        return controller().getWriteStatement(getTable());
-    }
-
-    /** {@inheritDoc} */
-    @Override public BoundStatement bindStatement(PreparedStatement statement) {
-        return controller().bindKeyValue(statement, entry.getKey(), entry.getValue());
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/package-info.java
deleted file mode 100644
index e4d437716768a..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/transaction/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains mutations implementation, to store changes made inside Ignite transaction
- */
-
-package org.apache.ignite.cache.store.cassandra.session.transaction;
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
deleted file mode 100644
index 569c65de5eb71..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/DDLGenerator.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.cache.store.cassandra.utils;
-
-import java.io.File;
-import java.util.List;
-
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-
-/**
- * Generates Cassandra DDL statements from persistence descriptor xml file.
- */
-public class DDLGenerator {
-    /**
-     * DDLGenerator entry point.
-     *
-     * @param args Arguments for DDLGenerator.
-     */
-    public static void main(String[] args) {
-        if (args == null || args.length == 0)
-            return;
-
-        boolean success = true;
-
-        for (String arg : args) {
-            File file = new File(arg);
-            if (!file.isFile()) {
-                success = false;
-                System.out.println("-------------------------------------------------------------");
-                System.out.println("Incorrect file specified: " + arg);
-                System.out.println("-------------------------------------------------------------");
-                continue;
-            }
-
-            try {
-                KeyValuePersistenceSettings settings = new KeyValuePersistenceSettings(file);
-                String table = settings.getTable() != null ? settings.getTable() : "my_table";
-
-                System.out.println("-------------------------------------------------------------");
-                System.out.println("DDL for keyspace/table from file: " + arg);
-                System.out.println("-------------------------------------------------------------");
-                System.out.println();
-                System.out.println(settings.getKeyspaceDDLStatement());
-                System.out.println();
-                System.out.println(settings.getTableDDLStatement(table));
-                System.out.println();
-
-                List<String> statements = settings.getIndexDDLStatements(table);
-                if (statements != null && !statements.isEmpty()) {
-                    for (String st : statements) {
-                        System.out.println(st);
-                        System.out.println();
-                    }
-                }
-            }
-            catch (Throwable e) {
-                success = false;
-                System.out.println("-------------------------------------------------------------");
-                System.out.println("Invalid file specified: " + arg);
-                System.out.println("-------------------------------------------------------------");
-                e.printStackTrace();
-            }
-        }
-
-        if (!success)
-            throw new RuntimeException("Failed to process some of the specified files");
-    }
-}
diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java
deleted file mode 100644
index 3a2cd108f782c..0000000000000
--- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/utils/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains utility classes
- */
-
-package org.apache.ignite.cache.store.cassandra.utils;
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/README.txt b/modules/cassandra/store/src/test/bootstrap/aws/README.txt
deleted file mode 100644
index a61b235132fd2..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/README.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-Shell scripts to spin up Ignite, Cassandra and Load tests clusters in AWS.
-
-1) cassandra - bootstrap scripts for Cassandra cluster nodes
-2) ganglia - bootstrap scripts for Ganglia master and agents
-3) ignite - bootstrap scripts for Ignite cluster nodes
-4) tests - bootstrap scripts for Load Tests cluster nodes
-5) common.sh - definitions for common functions
-6) env.sh - definitions for common variables
-7) log-collector.sh - log collector daemon script, to collect logs and upload them to S3
-
-For more details please look at the documentation:
-
-    https://apacheignite.readme.io/docs/aws-infrastructure-deployment
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh
deleted file mode 100644
index 017b1b13b1eea..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Bootstrap script to spin up Cassandra cluster
-# -----------------------------------------------------------------------------------------------
-
-# URL to download AWS CLI tools
-AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
-
-# URL to download JDK
-JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
-
-# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
-TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
-
-# Terminates script execution and upload logs to S3
-terminate()
-{
-    SUCCESS_URL=$S3_CASSANDRA_BOOTSTRAP_SUCCESS
-    FAILURE_URL=$S3_CASSANDRA_BOOTSTRAP_FAILURE
-
-    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
-        SUCCESS_URL=${SUCCESS_URL}/
-    fi
-
-    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
-        FAILURE_URL=${FAILURE_URL}/
-    fi
-
-    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    msg=$host_name
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Cassandra node bootstrap failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-
-        if [ -z "$FAILURE_URL" ]; then
-            exit 1
-        fi
-
-        reportFolder=${FAILURE_URL}${host_name}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Cassandra node bootstrap successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-
-        if [ -z "$SUCCESS_URL" ]; then
-            exit 0
-        fi
-
-        reportFolder=${SUCCESS_URL}${host_name}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/bootstrap-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to drop report folder: $reportFolder"
-    fi
-
-    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
-    fi
-
-    rm -f /opt/bootstrap-result
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-# Downloads specified package
-downloadPackage()
-{
-    echo "[INFO] Downloading $3 package from $1 into $2"
-
-    for i in 0 9;
-    do
-        if [[ "$1" == s3* ]]; then
-            aws s3 cp $1 $2
-            code=$?
-        else
-            curl "$1" -o "$2"
-            code=$?
-        fi
-
-        if [ $code -eq 0 ]; then
-            echo "[INFO] $3 package successfully downloaded from $1 into $2"
-            return 0
-        fi
-
-        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
-        sleep 5s
-    done
-
-    terminate "All 10 attempts to download $3 package from $1 are failed"
-}
-
-# Downloads and setup JDK
-setupJava()
-{
-    rm -Rf /opt/java /opt/jdk.tar.gz
-
-    echo "[INFO] Downloading 'jdk'"
-    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
-    if [ $? -ne 0 ]; then
-        terminate "Failed to download 'jdk'"
-    fi
-
-    echo "[INFO] Untaring 'jdk'"
-    tar -xvzf /opt/jdk.tar.gz -C /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to untar 'jdk'"
-    fi
-
-    rm -Rf /opt/jdk.tar.gz
-
-    unzipDir=$(ls /opt | grep "jdk")
-    if [ "$unzipDir" != "java" ]; then
-        mv /opt/$unzipDir /opt/java
-    fi
-}
-
-# Downloads and setup AWS CLI
-setupAWSCLI()
-{
-    echo "[INFO] Installing 'awscli'"
-    pip install --upgrade awscli
-    if [ $? -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[ERROR] Failed to install 'awscli' using pip"
-    echo "[INFO] Trying to install awscli using zip archive"
-    echo "[INFO] Downloading awscli zip"
-
-    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
-
-    echo "[INFO] Unzipping awscli zip"
-    unzip /opt/awscli-bundle.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip awscli zip"
-    fi
-
-    rm -Rf /opt/awscli-bundle.zip
-
-    echo "[INFO] Installing awscli"
-    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install awscli"
-    fi
-
-    echo "[INFO] Successfully installed awscli from zip archive"
-}
-
-# Setup all the pre-requisites (packages, settings and etc.)
-setupPreRequisites()
-{
-    echo "[INFO] Installing 'wget' package"
-    yum -y install wget
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'wget' package"
-    fi
-
-    echo "[INFO] Installing 'net-tools' package"
-    yum -y install net-tools
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'net-tools' package"
-    fi
-
-    echo "[INFO] Installing 'python' package"
-    yum -y install python
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'python' package"
-    fi
-
-    echo "[INFO] Installing 'unzip' package"
-    yum -y install unzip
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'unzip' package"
-    fi
-
-    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
-
-    echo "[INFO] Installing 'pip'"
-    python /opt/get-pip.py
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'pip'"
-    fi
-}
-
-# Downloads and setup tests package
-setupTestsPackage()
-{
-    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
-
-    rm -Rf /opt/ignite-cassandra-tests
-
-    unzip /opt/ignite-cassandra-tests.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip tests package"
-    fi
-
-    rm -f /opt/ignite-cassandra-tests.zip
-
-    unzipDir=$(ls /opt | grep "ignite-cassandra")
-    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
-        mv /opt/$unzipDir /opt/ignite-cassandra-tests
-    fi
-
-    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
-
-    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "cassandra"
-
-    setupNTP
-
-    echo "[INFO] Starting logs collector daemon"
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_CASSANDRA_LOGS/$HOST_NAME" "/opt/cassandra/logs" "/opt/cassandra/cassandra-start.log" > /opt/logs-collector.log &
-
-    echo "[INFO] Logs collector daemon started: $!"
-
-    echo "----------------------------------------------------------------------------------------"
-    printInstanceInfo
-    echo "----------------------------------------------------------------------------------------"
-    tagInstance
-    bootstrapGangliaAgent "cassandra" 8641
-}
-
-# Downloads Cassandra package
-downloadCassandra()
-{
-    downloadPackage "$CASSANDRA_DOWNLOAD_URL" "/opt/apache-cassandra.tar.gz" "Cassandra"
-
-    rm -Rf /opt/cassandra
-
-    echo "[INFO] Untaring Cassandra package"
-    tar -xvzf /opt/apache-cassandra.tar.gz -C /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to untar Cassandra package"
-    fi
-
-    rm -f /opt/apache-cassandra.tar.gz
-
-    unzipDir=$(ls /opt | grep "cassandra" | grep "apache")
-    if [ "$unzipDir" != "cassandra" ]; then
-        mv /opt/$unzipDir /opt/cassandra
-    fi
-}
-
-# Setups Cassandra
-setupCassandra()
-{
-    echo "[INFO] Creating 'cassandra' group"
-    exists=$(cat /etc/group | grep cassandra)
-    if [ -z "$exists" ]; then
-        groupadd cassandra
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create 'cassandra' group"
-        fi
-    fi
-
-    echo "[INFO] Creating 'cassandra' user"
-    exists=$(cat /etc/passwd | grep cassandra)
-    if [ -z "$exists" ]; then
-        useradd -g cassandra cassandra
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create 'cassandra' user"
-        fi
-    fi
-
-    rm -f /opt/cassandra/conf/cassandra-env.sh /opt/cassandra/conf/cassandra-template.yaml
-
-    cp /opt/ignite-cassandra-tests/bootstrap/aws/cassandra/cassandra-env.sh /opt/cassandra/conf
-    cp /opt/ignite-cassandra-tests/bootstrap/aws/cassandra/cassandra-template.yaml /opt/cassandra/conf
-
-    chown -R cassandra:cassandra /opt/cassandra /opt/ignite-cassandra-tests
-
-    createCassandraStorageLayout
-
-    cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_DATA_DIR\}/$CASSANDRA_DATA_DIR/g" > /opt/cassandra/conf/cassandra-template-1.yaml
-    cat /opt/cassandra/conf/cassandra-template-1.yaml | sed -r "s/\\\$\{CASSANDRA_COMMITLOG_DIR\}/$CASSANDRA_COMMITLOG_DIR/g" > /opt/cassandra/conf/cassandra-template-2.yaml
-    cat /opt/cassandra/conf/cassandra-template-2.yaml | sed -r "s/\\\$\{CASSANDRA_CACHES_DIR\}/$CASSANDRA_CACHES_DIR/g" > /opt/cassandra/conf/cassandra-template-3.yaml
-
-    rm -f /opt/cassandra/conf/cassandra-template.yaml /opt/cassandra/conf/cassandra-template-1.yaml /opt/cassandra/conf/cassandra-template-2.yaml
-    mv /opt/cassandra/conf/cassandra-template-3.yaml /opt/cassandra/conf/cassandra-template.yaml
-
-    echo "export JAVA_HOME=/opt/java" >> $1
-    echo "export CASSANDRA_HOME=/opt/cassandra" >> $1
-    echo "export PATH=\$JAVA_HOME/bin:\$CASSANDRA_HOME/bin:\$PATH" >> $1
-}
-
-###################################################################################################################
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Bootstrapping Cassandra node"
-echo "[INFO]-----------------------------------------------------------------"
-
-setupPreRequisites
-setupJava
-setupAWSCLI
-setupTestsPackage
-downloadCassandra
-setupCassandra "/root/.bash_profile"
-
-cmd="/opt/ignite-cassandra-tests/bootstrap/aws/cassandra/cassandra-start.sh"
-
-#sudo -u cassandra -g cassandra sh -c "$cmd | tee /opt/cassandra/cassandra-start.log"
-
-$cmd | tee /opt/cassandra/cassandra-start.log
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-env.sh b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-env.sh
deleted file mode 100644
index ba764018bfe0a..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-env.sh
+++ /dev/null
@@ -1,287 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Environment setup script from Cassandra distribution
-# -----------------------------------------------------------------------------------------------
-
-calculate_heap_sizes()
-{
-    case "`uname`" in
-        Linux)
-            system_memory_in_mb=`free -m | awk '/:/ {print $2;exit}'`
-            system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo`
-        ;;
-        FreeBSD)
-            system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
-            system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
-            system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
-        ;;
-        SunOS)
-            system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
-            system_cpu_cores=`psrinfo | wc -l`
-        ;;
-        Darwin)
-            system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
-            system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
-            system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
-        ;;
-        *)
-            # assume reasonable defaults for e.g. a modern desktop or
-            # cheap server
-            system_memory_in_mb="2048"
-            system_cpu_cores="2"
-        ;;
-    esac
-
-    # some systems like the raspberry pi don't report cores, use at least 1
-    if [ "$system_cpu_cores" -lt "1" ]
-    then
-        system_cpu_cores="1"
-    fi
-
-    # set max heap size based on the following
-    # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
-    # calculate 1/2 ram and cap to 1024MB
-    # calculate 1/4 ram and cap to 8192MB
-    # pick the max
-    half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
-    quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
-    if [ "$half_system_memory_in_mb" -gt "1024" ]
-    then
-        half_system_memory_in_mb="1024"
-    fi
-    if [ "$quarter_system_memory_in_mb" -gt "8192" ]
-    then
-        quarter_system_memory_in_mb="8192"
-    fi
-    if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ]
-    then
-        max_heap_size_in_mb="$half_system_memory_in_mb"
-    else
-        max_heap_size_in_mb="$quarter_system_memory_in_mb"
-    fi
-    MAX_HEAP_SIZE="${max_heap_size_in_mb}M"
-
-    # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
-    max_sensible_yg_per_core_in_mb="100"
-    max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores`
-
-    desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
-
-    if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
-    then
-        HEAP_NEWSIZE="${max_sensible_yg_in_mb}M"
-    else
-        HEAP_NEWSIZE="${desired_yg_in_mb}M"
-    fi
-}
-
-# Determine the sort of JVM we'll be running on.
-java_ver_output=`"${JAVA:-java}" -version 2>&1`
-jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}'`
-JVM_VERSION=${jvmver%_*}
-JVM_PATCH_VERSION=${jvmver#*_}
-
-if [ "$JVM_VERSION" \< "1.8" ] ; then
-    echo "Cassandra 3.0 and later require Java 8u40 or later."
-    exit 1;
-fi
-
-if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" \< "40" ] ; then
-    echo "Cassandra 3.0 and later require Java 8u40 or later."
-    exit 1;
-fi
-
-jvm=`echo "$java_ver_output" | grep -A 1 'java version' | awk 'NR==2 {print $1}'`
-case "$jvm" in
-    OpenJDK)
-        JVM_VENDOR=OpenJDK
-        # this will be "64-Bit" or "32-Bit"
-        JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
-        ;;
-    "Java(TM)")
-        JVM_VENDOR=Oracle
-        # this will be "64-Bit" or "32-Bit"
-        JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
-        ;;
-    *)
-        # Help fill in other JVM values
-        JVM_VENDOR=other
-        JVM_ARCH=unknown
-        ;;
-esac
-
-# Override these to set the amount of memory to allocate to the JVM at
-# start-up. For production use you may wish to adjust this for your
-# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
-# to the Java heap. HEAP_NEWSIZE refers to the size of the young
-# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
-# or not (if you set one, set the other).
-#
-# The main trade-off for the young generation is that the larger it
-# is, the longer GC pause times will be. The shorter it is, the more
-# expensive GC will be (usually).
-#
-# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
-# times. If in doubt, and if you do not particularly want to tweak, go with
-# 100 MB per physical CPU core.
-
-#MAX_HEAP_SIZE="4G"
-#HEAP_NEWSIZE="800M"
-
-# Set this to control the amount of arenas per-thread in glibc
-#export MALLOC_ARENA_MAX=4
-
-# only calculate the size if it's not set manually
-if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then
-    calculate_heap_sizes
-else
-    if [ "x$MAX_HEAP_SIZE" = "x" ] ||  [ "x$HEAP_NEWSIZE" = "x" ]; then
-        echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)"
-        exit 1
-    fi
-fi
-
-if [ "x$MALLOC_ARENA_MAX" = "x" ] ; then
-    export MALLOC_ARENA_MAX=4
-fi
-
-#GC log path has to be defined here because it needs to access CASSANDRA_HOME
-JVM_OPTS="$JVM_OPTS -Xloggc:${CASSANDRA_HOME}/logs/gc.log"
-
-# Here we create the arguments that will get passed to the jvm when
-# starting cassandra.
-
-# Read user-defined JVM options from jvm.options file
-JVM_OPTS_FILE=$CASSANDRA_CONF/jvm.options
-for opt in `grep "^-" $JVM_OPTS_FILE`
-do
-  JVM_OPTS="$JVM_OPTS $opt"
-done
-
-# Check what parameters were defined on jvm.options file to avoid conflicts
-echo $JVM_OPTS | grep -q Xmn
-DEFINED_XMN=$?
-echo $JVM_OPTS | grep -q Xmx
-DEFINED_XMX=$?
-echo $JVM_OPTS | grep -q Xms
-DEFINED_XMS=$?
-echo $JVM_OPTS | grep -q UseConcMarkSweepGC
-USING_CMS=$?
-
-# We only set -Xms and -Xmx if they were not defined on jvm.options file
-# If defined, both Xmx and Xms should be defined together.
-if [ $DEFINED_XMX -ne 0 ] && [ $DEFINED_XMS -ne 0 ]; then
-     JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}"
-     JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}"
-elif [ $DEFINED_XMX -ne 0 ] || [ $DEFINED_XMS -ne 0 ]; then
-     echo "Please set or unset -Xmx and -Xms flags in pairs on jvm.options file."
-     exit 1
-fi
-
-# We only set -Xmn flag if it was not defined in jvm.options file
-# and if the CMS GC is being used
-# If defined, both Xmn and Xmx should be defined together.
-if [ $DEFINED_XMN -eq 0 ] && [ $DEFINED_XMX -ne 0 ]; then
-    echo "Please set or unset -Xmx and -Xmn flags in pairs on jvm.options file."
-    exit 1
-elif [ $DEFINED_XMN -ne 0 ] && [ $USING_CMS -eq 0 ]; then
-    JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}"
-fi
-
-if [ "$JVM_ARCH" = "64-Bit" ] && [ $USING_CMS -eq 0 ]; then
-    JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark"
-fi
-
-# provides hints to the JIT compiler
-JVM_OPTS="$JVM_OPTS -XX:CompileCommandFile=$CASSANDRA_CONF/hotspot_compiler"
-
-# add the jamm javaagent
-JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.0.jar"
-
-# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
-if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
-    JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof"
-fi
-
-# jmx: metrics and administration interface
-#
-# add this if you're having trouble connecting:
-# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
-#
-# see
-# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in_jconsole
-# for more on configuring JMX through firewalls, etc. (Short version:
-# get it working with no firewall first.)
-#
-# Cassandra ships with JMX accessible *only* from localhost.  
-# To enable remote JMX connections, uncomment lines below
-# with authentication and/or ssl enabled. See https://wiki.apache.org/cassandra/JmxSecurity 
-#
-if [ "x$LOCAL_JMX" = "x" ]; then
-    LOCAL_JMX=yes
-fi
-
-# Specifies the default port over which Cassandra will be available for
-# JMX connections.
-# For security reasons, you should not expose this port to the internet.  Firewall it if needed.
-JMX_PORT="7199"
-
-if [ "$LOCAL_JMX" = "yes" ]; then
-#  JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.local.port=$JMX_PORT -XX:+DisableExplicitGC"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.local.only=false"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false"
-  JVM_OPTS="$JVM_OPTS -XX:+UnlockCommercialFeatures"
-  JVM_OPTS="$JVM_OPTS -XX:+FlightRecorder"
-  JVM_OPTS="$JVM_OPTS -XX:FlightRecorderOptions=defaultrecording=true"
-else
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=true"
-  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password"
-#  JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStore=/path/to/keystore"
-#  JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStorePassword=<keystore-password>"
-#  JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStore=/path/to/truststore"
-#  JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStorePassword=<truststore-password>"
-#  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.need.client.auth=true"
-#  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.registry.ssl=true"
-#  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.protocols=<enabled-protocols>"
-#  JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.cipher.suites=<enabled-cipher-suites>"
-fi
-
-# To use mx4j, an HTML interface for JMX, add mx4j-tools.jar to the lib/
-# directory.
-# See http://wiki.apache.org/cassandra/Operations#Monitoring_with_MX4J
-# By default mx4j listens on 0.0.0.0:8081. Uncomment the following lines
-# to control its listen address and port.
-#MX4J_ADDRESS="-Dmx4jaddress=127.0.0.1"
-#MX4J_PORT="-Dmx4jport=8081"
-
-# Cassandra uses SIGAR to capture OS metrics CASSANDRA-7838
-# for SIGAR we have to set the java.library.path
-# to the location of the native libraries.
-JVM_OPTS="$JVM_OPTS -Djava.library.path=$CASSANDRA_HOME/lib/sigar-bin"
-
-JVM_OPTS="$JVM_OPTS $MX4J_ADDRESS"
-JVM_OPTS="$JVM_OPTS $MX4J_PORT"
-JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-start.sh b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-start.sh
deleted file mode 100644
index 4a6daef6cef39..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-start.sh
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Script to start Cassandra daemon (used by cassandra-bootstrap.sh)
-# -----------------------------------------------------------------------------------------------
-
-#profile=/home/cassandra/.bash_profile
-profile=/root/.bash_profile
-
-. $profile
-. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "cassandra"
-
-# Setups Cassandra seeds for this EC2 node. Looks for the information in S3 about
-# already up and running Cassandra cluster nodes
-setupCassandraSeeds()
-{
-    if [ "$FIRST_NODE_LOCK" == "true" ]; then
-        echo "[INFO] Setting up Cassandra seeds"
-
-        CLUSTER_SEEDS=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-        echo "[INFO] Using host address as a seed for the first Cassandra node: $CLUSTER_SEEDS"
-
-        aws s3 rm --recursive ${S3_CASSANDRA_NODES_DISCOVERY::-1}
-        if [ $? -ne 0 ]; then
-            terminate "Failed to clean Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
-        fi
-    else
-        setupClusterSeeds "cassandra" "true"
-        CLUSTER_SEEDS=$(echo $CLUSTER_SEEDS | sed -r "s/ /,/g")
-    fi
-
-    cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CLUSTER_SEEDS/g" > /opt/cassandra/conf/cassandra.yaml
-}
-
-# Gracefully starts Cassandra daemon and waits until it joins Cassandra cluster
-startCassandra()
-{
-    echo "[INFO]-------------------------------------------------------------"
-    echo "[INFO] Trying attempt $START_ATTEMPT to start Cassandra daemon"
-    echo "[INFO]-------------------------------------------------------------"
-    echo ""
-
-    setupCassandraSeeds
-
-    waitToJoinCluster
-
-    if [ "$FIRST_NODE_LOCK" == "true" ]; then
-        aws s3 rm --recursive ${S3_CASSANDRA_NODES_DISCOVERY::-1}
-        if [ $? -ne 0 ]; then
-            terminate "Failed to clean Cassandra node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
-        fi
-    fi
-
-    proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon")
-    proc=($proc)
-
-    if [ -n "${proc[1]}" ]; then
-        echo "[INFO] Terminating existing Cassandra process ${proc[1]}"
-        kill -9 ${proc[1]}
-    fi
-
-    echo "[INFO] Starting Cassandra"
-    rm -Rf /opt/cassandra/logs/* /storage/cassandra/*
-    /opt/cassandra/bin/cassandra -R &
-
-    echo "[INFO] Cassandra job id: $!"
-
-    sleep 1m
-
-    START_ATTEMPT=$(( $START_ATTEMPT+1 ))
-}
-
-#######################################################################################################
-
-START_ATTEMPT=0
-
-# Cleans all the previous metadata about this EC2 node
-unregisterNode
-
-# Tries to get first-node lock
-tryToGetFirstNodeLock
-
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ "$FIRST_NODE_LOCK" == "true" ]; then
-    echo "[INFO] Starting first Cassandra node"
-else
-    echo "[INFO] Starting Cassandra node"
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-printInstanceInfo
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ "$FIRST_NODE_LOCK" != "true" ]; then
-    waitFirstClusterNodeRegistered "true"
-else
-    cleanupMetadata
-fi
-
-# Start Cassandra daemon
-startCassandra
-
-startTime=$(date +%s)
-
-# Trying multiple attempts to start Cassandra daemon
-while true; do
-    proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon")
-
-    /opt/cassandra/bin/nodetool status &> /dev/null
-
-    if [ $? -eq 0 ]; then
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Cassandra daemon successfully started"
-        echo "[INFO]-----------------------------------------------------"
-        echo $proc
-        echo "[INFO]-----------------------------------------------------"
-
-        # Once node joined the cluster we need to remove cluster-join lock
-        # to allow other EC2 nodes to acquire it and join cluster sequentially
-        removeClusterJoinLock
-
-        break
-    fi
-
-    currentTime=$(date +%s)
-    duration=$(( $currentTime-$startTime ))
-    duration=$(( $duration/60 ))
-
-    if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
-        if [ "$FIRST_NODE_LOCK" == "true" ]; then
-            # If the first node of Cassandra cluster failed to start Cassandra daemon in SERVICE_STARTUP_TIME min,
-            # we will not try any other attempts and just terminate with error. Terminate function itself, will
-            # take care about removing all the locks holding by this node.
-            terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first Cassandra daemon is still not up and running"
-        else
-            # If node isn't the first node of Cassandra cluster and it failed to start we need to
-            # remove cluster-join lock to allow other EC2 nodes to acquire it
-            removeClusterJoinLock
-
-            # If node failed all SERVICE_START_ATTEMPTS attempts to start Cassandra daemon we will not
-            # try anymore and terminate with error
-            if [ $START_ATTEMPT -gt $SERVICE_START_ATTEMPTS ]; then
-                terminate "${SERVICE_START_ATTEMPTS} attempts exceed, but Cassandra daemon is still not up and running"
-            fi
-
-            # New attempt to start Cassandra daemon
-            startCassandra
-        fi
-
-        continue
-    fi
-
-    # Checking for the situation when two nodes trying to simultaneously join Cassandra cluster.
-    # This actually can happen only in not standard situation, when you are trying to start
-    # Cassandra daemon on some EC2 nodes manually and not using bootstrap script.
-    concurrencyError=$(cat /opt/cassandra/logs/system.log | grep "java.lang.UnsupportedOperationException: Other bootstrapping/leaving/moving nodes detected, cannot bootstrap while cassandra.consistent.rangemovement is true")
-
-    if [ -n "$concurrencyError" ] && [ "$FIRST_NODE_LOCK" != "true" ]; then
-        # Remove cluster-join lock to allow other EC2 nodes to acquire it
-        removeClusterJoinLock
-
-        echo "[WARN] Failed to concurrently start Cassandra daemon. Sleeping for extra 30sec"
-        sleep 30s
-
-        # New attempt to start Cassandra daemon
-        startCassandra
-
-        continue
-    fi
-
-    # Handling situation when Cassandra daemon process abnormally terminated
-    if [ -z "$proc" ]; then
-        # If this is the first node of Cassandra cluster just terminating with error
-        if [ "$FIRST_NODE_LOCK" == "true" ]; then
-            terminate "Failed to start Cassandra daemon"
-        fi
-
-        # Remove cluster-join lock to allow other EC2 nodes to acquire it
-        removeClusterJoinLock
-
-        echo "[WARN] Failed to start Cassandra daemon. Sleeping for extra 30sec"
-        sleep 30s
-
-        # New attempt to start Cassandra daemon
-        startCassandra
-
-        continue
-    fi
-
-    echo "[INFO] Waiting for Cassandra daemon to start, time passed ${duration}min"
-    sleep 30s
-done
-
-# Once Cassandra daemon successfully started we registering new Cassandra node in S3
-registerNode
-
-# Terminating script with zero exit code
-terminate
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-template.yaml b/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
deleted file mode 100644
index e621886d64310..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/cassandra/cassandra-template.yaml
+++ /dev/null
@@ -1,888 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Cassandra storage config YAML
-
-# NOTE:
-#   See http://wiki.apache.org/cassandra/StorageConfiguration for
-#   full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: 'CassandraIgnite'
-
-# It makes new (non-seed) nodes automatically migrate the right data to themselves.
-# When initializing a fresh cluster with no data, add auto_bootstrap: false
-auto_bootstrap: false
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting on the node's initial start,
-# on subsequent starts, this setting will apply even if initial token is set.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to 
-# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
-num_tokens: 256
-
-# initial_token allows you to specify tokens manually.  While you can use # it with
-# vnodes (num_tokens > 1, above) -- in which case you should provide a 
-# comma-separated list -- it's primarily used when adding nodes # to legacy clusters 
-# that do not have vnodes enabled.
-# initial_token:
-
-# See http://wiki.apache.org/cassandra/HintedHandoff
-# May either be "true" or "false" to enable globally, or contain a list
-# of data centers to enable per-datacenter.
-# hinted_handoff_enabled: DC1,DC2
-hinted_handoff_enabled: true
-# this defines the maximum amount of time a dead host will have hints
-# generated.  After it has been dead this long, new hints for it will not be
-# created until it has been seen alive and gone down again.
-max_hint_window_in_ms: 10800000 # 3 hours
-# Maximum throttle in KBs per second, per delivery thread.  This will be
-# reduced proportionally to the number of nodes in the cluster.  (If there
-# are two nodes in the cluster, each delivery thread will use the maximum
-# rate; if there are three, each will throttle to half of the maximum,
-# since we expect two nodes to be delivering hints simultaneously.)
-hinted_handoff_throttle_in_kb: 1024
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# Maximum throttle in KBs per second, total. This will be
-# reduced proportionally to the number of nodes in the cluster.
-batchlog_replay_throttle_in_kb: 1024
-
-# Authentication backend, implementing IAuthenticator; used to identify users
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
-# PasswordAuthenticator}.
-#
-# - AllowAllAuthenticator performs no checks - set it to disable authentication.
-# - PasswordAuthenticator relies on username/password pairs to authenticate
-#   users. It keeps usernames and hashed passwords in system_auth.credentials table.
-#   Please increase system_auth keyspace replication factor if you use this authenticator.
-#   If using PasswordAuthenticator, CassandraRoleManager must also be used (see below)
-#authenticator: PasswordAuthenticator
-authenticator: AllowAllAuthenticator
-
-# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
-# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
-# CassandraAuthorizer}.
-#
-# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
-# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
-#   increase system_auth keyspace replication factor if you use this authorizer.
-#authorizer: CassandraAuthorizer
-authorizer: AllowAllAuthorizer
-
-# Part of the Authentication & Authorization backend, implementing IRoleManager; used
-# to maintain grants and memberships between roles.
-# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager,
-# which stores role information in the system_auth keyspace. Most functions of the
-# IRoleManager require an authenticated login, so unless the configured IAuthenticator
-# actually implements authentication, most of this functionality will be unavailable.
-#
-# - CassandraRoleManager stores role data in the system_auth keyspace. Please
-#   increase system_auth keyspace replication factor if you use this role manager.
-role_manager: CassandraRoleManager
-
-# Validity period for roles cache (fetching permissions can be an
-# expensive operation depending on the authorizer). Granted roles are cached for
-# authenticated sessions in AuthenticatedUser and after the period specified
-# here, become eligible for (async) reload.
-# Defaults to 2000, set to 0 to disable.
-# Will be disabled automatically for AllowAllAuthenticator.
-roles_validity_in_ms: 2000
-
-# Refresh interval for roles cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If roles_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as roles_validity_in_ms.
-# roles_update_interval_in_ms: 1000
-
-# Validity period for permissions cache (fetching permissions can be an
-# expensive operation depending on the authorizer, CassandraAuthorizer is
-# one example). Defaults to 2000, set to 0 to disable.
-# Will be disabled automatically for AllowAllAuthorizer.
-permissions_validity_in_ms: 2000
-
-# Refresh interval for permissions cache (if enabled).
-# After this interval, cache entries become eligible for refresh. Upon next
-# access, an async reload is scheduled and the old value returned until it
-# completes. If permissions_validity_in_ms is non-zero, then this must be
-# also.
-# Defaults to the same value as permissions_validity_in_ms.
-# permissions_update_interval_in_ms: 1000
-
-# The partitioner is responsible for distributing groups of rows (by
-# partition key) across nodes in the cluster.  You should leave this
-# alone for new clusters.  The partitioner can NOT be changed without
-# reloading all data, so when upgrading you should set this to the
-# same partitioner you were already using.
-#
-# Besides Murmur3Partitioner, partitioners included for backwards
-# compatibility include RandomPartitioner, ByteOrderedPartitioner, and
-# OrderPreservingPartitioner.
-#
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-
-# Directories where Cassandra should store data on disk.  Cassandra
-# will spread data evenly across them, subject to the granularity of
-# the configured compaction strategy.
-# If not set, the default directory is $CASSANDRA_HOME/data/data.
-data_file_directories: ${CASSANDRA_DATA_DIR}
-
-# commit log.  when running on magnetic HDD, this should be a
-# separate spindle than the data directories.
-# If not set, the default directory is $CASSANDRA_HOME/data/commitlog.
-commitlog_directory: ${CASSANDRA_COMMITLOG_DIR}
-
-# policy for data disk failures:
-# die: shut down gossip and client transports and kill the JVM for any fs errors or
-#      single-sstable errors, so the node can be replaced.
-# stop_paranoid: shut down gossip and client transports even for single-sstable errors,
-#                kill the JVM for errors during startup.
-# stop: shut down gossip and client transports, leaving the node effectively dead, but
-#       can still be inspected via JMX, kill the JVM for errors during startup.
-# best_effort: stop using the failed disk and respond to requests based on
-#              remaining available sstables.  This means you WILL see obsolete
-#              data at CL.ONE!
-# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# policy for commit disk failures:
-# die: shut down gossip and Thrift and kill the JVM, so the node can be replaced.
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-#       can still be inspected via JMX.
-# stop_commit: shutdown the commit log, letting writes collect but
-#              continuing to service reads, as in pre-2.0.5 Cassandra
-# ignore: ignore fatal errors and let the batches fail
-commit_failure_policy: stop
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must contain the entire row,
-# so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the key cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Row cache implementation class name.
-# Available implementations:
-#   org.apache.cassandra.cache.OHCProvider                Fully off-heap row cache implementation (default).
-#   org.apache.cassandra.cache.SerializingCacheProvider   This is the row cache implementation availabile
-#                                                         in previous releases of Cassandra.
-# row_cache_class_name: org.apache.cassandra.cache.OHCProvider
-
-# Maximum size of the row cache in memory.
-# Please note that OHC cache implementation requires some additional off-heap memory to manage
-# the map structures and some in-flight memory during operations before/after cache entries can be
-# accounted against the cache capacity. This overhead is usually small compared to the whole capacity.
-# Do not specify more memory that the system can afford in the worst usual situation and leave some
-# headroom for OS block level cache. Do never allow your system to swap.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should save the row cache.
-# Caches are saved to saved_caches_directory as specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save.
-# Specify 0 (which is the default), meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# Maximum size of the counter cache in memory.
-#
-# Counter cache helps to reduce counter locks' contention for hot counter cells.
-# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before
-# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration
-# of the lock hold, helping with hot counter cell updates, but will not allow skipping
-# the read entirely. Only the local (clock, count) tuple of a counter cell is kept
-# in memory, not the whole counter, so it's relatively cheap.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache.
-# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache.
-counter_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# save the counter cache (keys only). Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Default is 7200 or 2 hours.
-counter_cache_save_period: 7200
-
-# Number of keys from the counter cache to save
-# Disabled by default, meaning all keys are going to be saved
-# counter_cache_keys_to_save: 100
-
-# The off-heap memory allocator.  Affects storage engine metadata as
-# well as caches.  Experiments show that JEMAlloc saves some memory
-# than the native GCC allocator (i.e., JEMalloc is more
-# fragmentation-resistant).
-# 
-# Supported values are: NativeAllocator, JEMallocAllocator
-#
-# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
-# modify cassandra-env.sh as directed in the file.
-#
-# Defaults to NativeAllocator
-# memory_allocator: NativeAllocator
-
-# saved caches
-# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches.
-saved_caches_directory: ${CASSANDRA_CACHES_DIR}
-
-# commitlog_sync may be either "periodic" or "batch." 
-# 
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk.  It will wait
-# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
-# This window should be kept short because the writer threads will
-# be unable to do extra work while waiting.  (You may need to increase
-# concurrent_writes for the same reason.)
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 2
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds. 
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-
-# The size of the individual commitlog file segments.  A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentially from each columnfamily in the system) has been
-# flushed to sstables.  
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-commitlog_segment_size_in_mb: 32
-
-# Compression to apply to the commit log. If omitted, the commit log
-# will be written uncompressed.  LZ4, Snappy, and Deflate compressors
-# are supported.
-#commitlog_compression:
-#   - class_name: LZ4Compressor
-#     parameters:
-#         -
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map<String, String> of parameters will do.
-seed_provider:
-    # Addresses of hosts that are deemed contact points. 
-    # Cassandra nodes use this list of hosts to find each other and learn
-    # the topology of the ring.  You must change this if you are running
-    # multiple nodes!
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          # seeds is actually a comma-delimited list of addresses.
-          # Ex: "<ip1>,<ip2>,<ip3>"
-          - seeds: "${CASSANDRA_SEEDS}"
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them. Same applies to
-# "concurrent_counter_writes", since counter writes read the current
-# values before incrementing and writing them back.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-concurrent_counter_writes: 32
-
-# Total memory to use for sstable-reading buffers.  Defaults to
-# the smaller of 1/4 of heap or 512MB.
-# file_cache_size_in_mb: 512
-
-# Total permitted memory to use for memtables. Cassandra will stop 
-# accepting writes when the limit is exceeded until a flush completes,
-# and will trigger a flush based on memtable_cleanup_threshold
-# If omitted, Cassandra will set both to 1/4 the size of the heap.
-# memtable_heap_space_in_mb: 2048
-# memtable_offheap_space_in_mb: 2048
-
-# Ratio of occupied non-flushing memtable size to total permitted size
-# that will trigger a flush of the largest memtable.  Lager mct will
-# mean larger flushes and hence less compaction, but also less concurrent
-# flush activity which can make it difficult to keep your disks fed
-# under heavy write load.
-#
-# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1)
-# memtable_cleanup_threshold: 0.11
-
-# Specify the way Cassandra allocates and manages memtable memory.
-# Options are:
-#   heap_buffers:    on heap nio buffers
-#   offheap_buffers: off heap (direct) nio buffers
-#   offheap_objects: native memory, eliminating nio buffer heap overhead
-memtable_allocation_type: heap_buffers
-
-# Total space to use for commit logs on disk.
-#
-# If space gets above this value, Cassandra will flush every dirty CF
-# in the oldest segment and remove it.  So a small total commitlog space
-# will tend to cause more flush activity on less-active columnfamilies.
-#
-# The default value is 8192.
-# commitlog_total_space_in_mb: 8192
-
-# This sets the amount of memtable flush writer threads.  These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked. 
-#
-# memtable_flush_writers defaults to the smaller of (number of disks,
-# number of cores), with a minimum of 2 and a maximum of 8.
-# 
-# If your data directories are backed by SSD, you should increase this
-# to the number of cores.
-#memtable_flush_writers: 8
-
-# A fixed memory pool size in MB for for SSTable index summaries. If left
-# empty, this will default to 5% of the heap size. If the memory usage of
-# all index summaries exceeds this limit, SSTables with low read rates will
-# shrink their index summaries in order to meet this limit.  However, this
-# is a best-effort process. In extreme conditions Cassandra may need to use
-# more than this amount of memory.
-index_summary_capacity_in_mb:
-
-# How frequently index summaries should be resampled.  This is done
-# periodically to redistribute memory from the fixed-size pool to sstables
-# proportional their recent read rates.  Setting to -1 will disable this
-# process, leaving existing index summaries at their current sampling level.
-index_summary_resize_interval_in_minutes: 60
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSDs; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-# For security reasons, you should not expose this port to the internet.  Firewall it if needed.
-storage_port: 7000
-
-# SSL port, for encrypted communication.  Unused unless enabled in
-# encryption_options
-# For security reasons, you should not expose this port to the internet.  Firewall it if needed.
-ssl_storage_port: 7001
-
-# Address or interface to bind to and tell other Cassandra nodes to connect to.
-# You _must_ change this if you want multiple nodes to be able to communicate!
-#
-# Set listen_address OR listen_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
-#
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing _if_ the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting listen_address to 0.0.0.0 is always wrong.
-#
-# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
-# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4
-# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
-# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
-listen_address: 
-# listen_interface: eth0
-# listen_interface_prefer_ipv6: false
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-# broadcast_address: 1.2.3.4
-
-# Internode authentication backend, implementing IInternodeAuthenticator;
-# used to allow/disallow connections from peer nodes.
-# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
-
-# Whether to start the native transport server.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: true
-# port for the CQL native transport to listen for clients on
-# For security reasons, you should not expose this port to the internet.  Firewall it if needed.
-native_transport_port: 9042
-# The maximum threads for handling requests when the native transport is used.
-# This is similar to rpc_max_threads though the default differs slightly (and
-# there is no native_transport_min_threads, idle threads will always be stopped
-# after 30 seconds).
-# native_transport_max_threads: 128
-#
-# The maximum size of allowed frame. Frame (requests) larger than this will
-# be rejected as invalid. The default is 256MB.
-# native_transport_max_frame_size_in_mb: 256
-
-# The maximum number of concurrent client connections.
-# The default is -1, which means unlimited.
-# native_transport_max_concurrent_connections: -1
-
-# The maximum number of concurrent client connections per source ip.
-# The default is -1, which means unlimited.
-# native_transport_max_concurrent_connections_per_ip: -1
-
-# Whether to start the thrift rpc server.
-start_rpc: true
-
-# The address or interface to bind the Thrift RPC service and native transport
-# server to.
-#
-# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
-# to a single address, IP aliasing is not supported.
-#
-# Leaving rpc_address blank has the same effect as on listen_address
-# (i.e. it will be based on the configured hostname of the node).
-#
-# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
-# set broadcast_rpc_address to a value other than 0.0.0.0.
-#
-# For security reasons, you should not expose this port to the internet.  Firewall it if needed.
-#
-# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
-# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
-# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
-# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
-rpc_address: 
-# rpc_interface: eth1
-# rpc_interface_prefer_ipv6: false
-
-# port for Thrift to listen for clients on
-rpc_port: 9160
-
-# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
-# be set to 0.0.0.0. If left blank, this will be set to the value of
-# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
-# be set.
-broadcast_rpc_address: 
-
-# enable or disable keepalive on rpc/native connections
-rpc_keepalive: true
-
-# Cassandra provides two out-of-the-box options for the RPC Server:
-#
-# sync  -> One thread per thrift connection. For a very large number of clients, memory
-#          will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
-#          per thread, and that will correspond to your use of virtual memory (but physical memory
-#          may be limited depending on use of stack space).
-#
-# hsha  -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
-#          asynchronously using a small number of threads that does not vary with the amount
-#          of thrift clients (and thus scales well to many clients). The rpc requests are still
-#          synchronous (one thread per active request). If hsha is selected then it is essential
-#          that rpc_max_threads is changed from the default value of unlimited.
-#
-# The default is sync because on Windows hsha is about 30% slower.  On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively,  can provide your own RPC server by providing the fully-qualified class name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
-# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Uncomment to set socket buffer size for internode communication
-# Note that when setting this, the buffer size is limited by net.core.wmem_max
-# and when not setting it it is defined by net.ipv4.tcp_wmem
-# See:
-# /proc/sys/net/core/wmem_max
-# /proc/sys/net/core/rmem_max
-# /proc/sys/net/ipv4/tcp_wmem
-# /proc/sys/net/ipv4/tcp_wmem
-# and: man tcp
-# internode_send_buff_size_in_bytes:
-# internode_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum message length).
-thrift_framed_transport_size_in_mb: 15
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# keyspace data.  Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction.  Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you.  Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true 
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# When executing a scan, within or across a partition, we need to keep the
-# tombstones seen in memory so we can return them to the coordinator, which
-# will use them to make sure other replicas also know about the deleted rows.
-# With workloads that generate a lot of tombstones, this can cause performance
-# problems and even exaust the server heap.
-# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
-# Adjust the thresholds here if you understand the dangers and want to
-# scan more tombstones anyway.  These thresholds may also be adjusted at runtime
-# using the StorageService mbean.
-tombstone_warn_threshold: 1000
-tombstone_failure_threshold: 100000
-
-# Granularity of the collation index of rows within a partition.
-# Increase if your rows are large, or if you have a very large
-# number of rows per partition.  The competing goals are these:
-#   1) a smaller granularity means more index entries are generated
-#      and looking up rows withing the partition by collation column
-#      is faster
-#   2) but, Cassandra will keep the collation index in memory for hot
-#      rows (as part of the key cache), so a larger granularity means
-#      you can cache more hot rows
-column_index_size_in_kb: 64
-
-
-# Log WARN on any batch size exceeding this value. 5kb per batch by default.
-# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
-batch_size_warn_threshold_in_kb: 5
-
-# Fail any batch exceeding this value. 50kb (10x warn threshold) by default.
-batch_size_fail_threshold_in_kb: 50
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair.  Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the smaller of (number of disks,
-# number of cores), with a minimum of 2 and a maximum of 8.
-# 
-# If your data directories are backed by SSD, you should increase this
-# to the number of cores.
-#concurrent_compactors: 1
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# Log a warning when compacting partitions larger than this value
-compaction_large_partition_warning_threshold_mb: 100
-
-# When compacting, the replacement sstable(s) can be opened before they
-# are completely written, and used in place of the prior sstables for
-# any range that has been written. This helps to smoothly transfer reads 
-# between the sstables, reducing page cache churn and keeping hot rows hot
-sstable_preemptive_open_interval_in_mb: 50
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 200 Mbps or 25 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 200
-
-# Throttles all streaming file transfer between the datacenters,
-# this setting allows users to throttle inter dc stream throughput in addition
-# to throttling all network stream traffic as configured with
-# stream_throughput_outbound_megabits_per_sec
-# inter_dc_stream_throughput_outbound_megabits_per_sec:
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 50000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 20000
-# How long the coordinator should wait for counter writes to complete
-counter_write_request_timeout_in_ms: 5000
-# How long a coordinator should continue to retry a CAS operation
-# that contends with other proposals for the same row
-cas_contention_timeout_in_ms: 1000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because unless auto_snapshot is disabled
-# we need to flush first so we can snapshot before removing the data.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts.  If disabled, replicas will assume that requests
-# were forwarded to them instantly by the coordinator, which means that
-# under overload conditions we will waste that much extra time processing 
-# already-timed-out requests.
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Enable socket timeout for streaming operation.
-# When a timeout occurs during streaming, streaming is retried from the start
-# of the current file. This _can_ involve re-streaming an important amount of
-# data, so you should avoid setting the value too low.
-# Default value is 3600000, which means streams timeout after an hour.
-# streaming_socket_timeout_in_ms: 3600000
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch.  The snitch has two functions:
-# - it teaches Cassandra enough about your network topology to route
-#   requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-#   correlated failures. It does this by grouping machines into
-#   "datacenters" and "racks."  Cassandra will do its best not to have
-#   more than one replica on the same "rack" (which may not actually
-#   be a physical location)
-#
-# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
-# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
-# ARE PLACED.
-#
-# Out of the box, Cassandra provides
-#  - SimpleSnitch:
-#    Treats Strategy order as proximity. This can improve cache
-#    locality when disabling read repair.  Only appropriate for
-#    single-datacenter deployments.
-#  - GossipingPropertyFileSnitch
-#    This should be your go-to snitch for production use.  The rack
-#    and datacenter for the local node are defined in
-#    cassandra-rackdc.properties and propagated to other nodes via
-#    gossip.  If cassandra-topology.properties exists, it is used as a
-#    fallback, allowing migration from the PropertyFileSnitch.
-#  - PropertyFileSnitch:
-#    Proximity is determined by rack and data center, which are
-#    explicitly configured in cassandra-topology.properties.
-#  - Ec2Snitch:
-#    Appropriate for EC2 deployments in a single Region. Loads Region
-#    and Availability Zone information from the EC2 API. The Region is
-#    treated as the datacenter, and the Availability Zone as the rack.
-#    Only private IPs are used, so this will not work across multiple
-#    Regions.
-#  - Ec2MultiRegionSnitch:
-#    Uses public IPs as broadcast_address to allow cross-region
-#    connectivity.  (Thus, you should set seed addresses to the public
-#    IP as well.) You will need to open the storage_port or
-#    ssl_storage_port on the public IP firewall.  (For intra-Region
-#    traffic, Cassandra will switch to the private IP after
-#    establishing a connection.)
-#  - RackInferringSnitch:
-#    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's IP
-#    address, respectively.  Unless this happens to match your
-#    deployment conventions, this is best used as an example of
-#    writing a custom Snitch class and is provided in that spirit.
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: Ec2Snitch
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100 
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it.  This is
-# expressed as a double which represents a percentage.  Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
-# RoundRobin
-#  - throttle_limit -- The throttle_limit is the number of in-flight
-#                      requests per client.  Requests beyond 
-#                      that limit are queued up until
-#                      running requests can complete.
-#                      The value of 80 here is twice the number of
-#                      concurrent_reads + concurrent_writes.
-#  - default_weight -- default_weight is optional and allows for
-#                      overriding the default which is 1.
-#  - weights -- Weights are optional and will default to 1 or the
-#               overridden default_weight. The weight translates into how
-#               many requests are handled during each turn of the
-#               RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-#    throttle_limit: 80
-#    default_weight: 5
-#    weights:
-#      Keyspace1: 1
-#      Keyspace2: 5
-
-# request_scheduler_id -- An identifier based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# Enable or disable inter-node encryption
-# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
-# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
-# suite for authentication, key exchange and encryption of the actual data transfers.
-# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
-# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore.  For instructions on generating these files, see:
-# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
-#
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-    # require_client_auth: false
-
-# enable or disable client/server encryption.
-client_encryption_options:
-    enabled: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    # require_client_auth: false
-    # Set trustore and truststore_password if require_client_auth is true
-    # truststore: conf/.truststore
-    # truststore_password: cassandra
-    # More advanced defaults below:
-    # protocol: TLS
-    # algorithm: SunX509
-    # store_type: JKS
-    # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# can be:  all  - all traffic is compressed
-#          dc   - traffic between different datacenters is compressed
-#          none - nothing is compressed.
-internode_compression: all
-
-# Enable or disable tcp_nodelay for inter-dc communication.
-# Disabling it will result in larger (but fewer) network packets being sent,
-# reducing overhead from the TCP protocol itself, at the cost of increasing
-# latency if you block for cross-datacenter responses.
-inter_dc_tcp_nodelay: false
-
-# TTL for different trace types used during logging of the repair process.
-tracetype_query_ttl: 86400
-tracetype_repair_ttl: 604800
-
-# UDFs (user defined functions) are disabled by default.
-# As of Cassandra 2.2, there is no security manager or anything else in place that
-# prevents execution of evil code. CASSANDRA-9402 will fix this issue for Cassandra 3.0.
-# This will inherently be backwards-incompatible with any 2.2 UDF that perform insecure
-# operations such as opening a socket or writing to the filesystem.
-enable_user_defined_functions: false
-
-# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation.
-# Lowering this value on Windows can provide much tighter latency and better throughput, however
-# some virtualized environments may see a negative performance impact from changing this setting
-# below their system default. The sysinternals 'clockres' tool can confirm your system's default
-# setting.
-windows_timer_interval: 1
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/common.sh b/modules/cassandra/store/src/test/bootstrap/aws/common.sh
deleted file mode 100644
index 6469e951c3618..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/common.sh
+++ /dev/null
@@ -1,1481 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Common purpose functions used by bootstrap scripts
-# -----------------------------------------------------------------------------------------------
-
-# Validates values of the main environment variables specified in env.sh
-validate()
-{
-    if [ -n "$TESTS_TYPE" ] && [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
-        terminate "Incorrect tests type specified: $TESTS_TYPE"
-    fi
-
-    if [ -z "$S3_TESTS_NODES_DISCOVERY" ]; then
-        terminate "Tests discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_NODES_DISCOVERY" != */ ]]; then
-        S3_TESTS_NODES_DISCOVERY=${S3_TESTS_NODES_DISCOVERY}/
-    fi
-
-    if [ -z "$S3_TESTS_SUCCESS" ]; then
-        terminate "Tests success URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_SUCCESS" != */ ]]; then
-        S3_TESTS_SUCCESS=${S3_TESTS_SUCCESS}/
-    fi
-
-    if [ -z "$S3_TESTS_FAILURE" ]; then
-        terminate "Tests failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_FAILURE" != */ ]]; then
-        S3_TESTS_FAILURE=${S3_TESTS_FAILURE}/
-    fi
-
-    if [ -z "$S3_TESTS_IDLE" ]; then
-        terminate "Tests idle URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_IDLE" != */ ]]; then
-        S3_TESTS_IDLE=${S3_TESTS_IDLE}/
-    fi
-
-    if [ -z "$S3_TESTS_PREPARING" ]; then
-        terminate "Tests preparing URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_PREPARING" != */ ]]; then
-        S3_TESTS_PREPARING=${S3_TESTS_PREPARING}/
-    fi
-
-    if [ -z "$S3_TESTS_RUNNING" ]; then
-        terminate "Tests running URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_RUNNING" != */ ]]; then
-        S3_TESTS_RUNNING=${S3_TESTS_RUNNING}/
-    fi
-
-    if [ -z "$S3_TESTS_WAITING" ]; then
-        terminate "Tests waiting URL doesn't specified"
-    fi
-
-    if [[ "$S3_TESTS_WAITING" != */ ]]; then
-        S3_TESTS_WAITING=${S3_TESTS_WAITING}/
-    fi
-
-    if [ -z "$S3_IGNITE_NODES_DISCOVERY" ]; then
-        terminate "Ignite discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_NODES_DISCOVERY" != */ ]]; then
-        S3_IGNITE_NODES_DISCOVERY=${S3_IGNITE_NODES_DISCOVERY}/
-    fi
-
-    if [ -z "$S3_IGNITE_BOOTSTRAP_SUCCESS" ]; then
-        terminate "Ignite success URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_BOOTSTRAP_SUCCESS" != */ ]]; then
-        S3_IGNITE_BOOTSTRAP_SUCCESS=${S3_IGNITE_BOOTSTRAP_SUCCESS}/
-    fi
-
-    if [ -z "$S3_IGNITE_BOOTSTRAP_FAILURE" ]; then
-        terminate "Ignite failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_IGNITE_BOOTSTRAP_FAILURE" != */ ]]; then
-        S3_IGNITE_BOOTSTRAP_FAILURE=${S3_IGNITE_BOOTSTRAP_FAILURE}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_NODES_DISCOVERY" ]; then
-        terminate "Cassandra discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_NODES_DISCOVERY" != */ ]]; then
-        S3_CASSANDRA_NODES_DISCOVERY=${S3_CASSANDRA_NODES_DISCOVERY}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_BOOTSTRAP_SUCCESS" ]; then
-        terminate "Cassandra success URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_BOOTSTRAP_SUCCESS" != */ ]]; then
-        S3_CASSANDRA_BOOTSTRAP_SUCCESS=${S3_CASSANDRA_BOOTSTRAP_SUCCESS}/
-    fi
-
-    if [ -z "$S3_CASSANDRA_BOOTSTRAP_FAILURE" ]; then
-        terminate "Cassandra failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_CASSANDRA_BOOTSTRAP_FAILURE" != */ ]]; then
-        S3_CASSANDRA_BOOTSTRAP_FAILURE=${S3_CASSANDRA_BOOTSTRAP_FAILURE}/
-    fi
-
-    if [ -z "$S3_GANGLIA_MASTER_DISCOVERY" ]; then
-        terminate "Ganglia master discovery URL doesn't specified"
-    fi
-
-    if [[ "$S3_GANGLIA_MASTER_DISCOVERY" != */ ]]; then
-        S3_GANGLIA_MASTER_DISCOVERY=${S3_GANGLIA_MASTER_DISCOVERY}/
-    fi
-
-    if [ -z "$S3_GANGLIA_BOOTSTRAP_SUCCESS" ]; then
-        terminate "Ganglia master success URL doesn't specified"
-    fi
-
-    if [[ "$S3_GANGLIA_BOOTSTRAP_SUCCESS" != */ ]]; then
-        S3_GANGLIA_BOOTSTRAP_SUCCESS=${S3_GANGLIA_BOOTSTRAP_SUCCESS}/
-    fi
-
-    if [ -z "$S3_GANGLIA_BOOTSTRAP_FAILURE" ]; then
-        terminate "Ganglia master failure URL doesn't specified"
-    fi
-
-    if [[ "$S3_GANGLIA_BOOTSTRAP_FAILURE" != */ ]]; then
-        S3_GANGLIA_BOOTSTRAP_FAILURE=${S3_GANGLIA_BOOTSTRAP_FAILURE}/
-    fi
-}
-
-# Prints EC2 instance info
-printInstanceInfo()
-{
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        echo "[INFO] Cassandra download URL: $CASSANDRA_DOWNLOAD_URL"
-        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
-        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
-        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
-        echo "[INFO] Logs URL: $S3_CASSANDRA_LOGS"
-        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
-        echo "[INFO] Cassandra nodes discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
-        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
-        echo "[INFO] Cassandra first node lock URL: $S3_CASSANDRA_FIRST_NODE_LOCK"
-        echo "[INFO] Cassandra nodes join lock URL: $S3_CASSANDRA_NODES_JOIN_LOCK"
-        echo "[INFO] Cassandra success URL: $S3_CASSANDRA_BOOTSTRAP_SUCCESS"
-        echo "[INFO] Cassandra failure URL: $S3_CASSANDRA_BOOTSTRAP_FAILURE"
-    fi
-
-    if [ "$NODE_TYPE" == "ignite" ]; then
-        echo "[INFO] Ignite download URL: $IGNITE_DOWNLOAD_URL"
-        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
-        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
-        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
-        echo "[INFO] Logs URL: $S3_IGNITE_LOGS"
-        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
-        echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
-        echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
-        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
-        echo "[INFO] Ignite first node lock URL: $S3_IGNITE_FIRST_NODE_LOCK"
-        echo "[INFO] Ignite nodes join lock URL: $S3_IGNITE_NODES_JOIN_LOCK"
-        echo "[INFO] Ignite success URL: $S3_IGNITE_BOOTSTRAP_SUCCESS"
-        echo "[INFO] Ignite failure URL: $S3_IGNITE_BOOTSTRAP_FAILURE"
-    fi
-
-    if [ "$NODE_TYPE" == "test" ]; then
-        echo "[INFO] Tests type: $TESTS_TYPE"
-        echo "[INFO] Test nodes count: $TEST_NODES_COUNT"
-        echo "[INFO] Ignite nodes count: $IGNITE_NODES_COUNT"
-        echo "[INFO] Cassandra nodes count: $CASSANDRA_NODES_COUNT"
-        echo "[INFO] Tests summary URL: $S3_TESTS_SUMMARY"
-        echo "[INFO] ----------------------------------------------------"
-        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
-        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
-        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
-        echo "[INFO] Logs URL: $S3_TESTS_LOGS"
-        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
-        echo "[INFO] Test node discovery URL: $S3_TESTS_NODES_DISCOVERY"
-        echo "[INFO] Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
-        echo "[INFO] Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY"
-        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
-        echo "[INFO] Tests trigger URL: $S3_TESTS_TRIGGER"
-        echo "[INFO] Tests idle URL: $S3_TESTS_IDLE"
-        echo "[INFO] Tests preparing URL: $S3_TESTS_PREPARING"
-        echo "[INFO] Tests waiting URL: $S3_TESTS_WAITING"
-        echo "[INFO] Tests running URL: $S3_TESTS_RUNNING"
-        echo "[INFO] Tests success URL: $S3_TESTS_SUCCESS"
-        echo "[INFO] Tests failure URL: $S3_TESTS_FAILURE"
-        echo "[INFO] Ignite success URL: $S3_IGNITE_BOOTSTRAP_SUCCESS"
-        echo "[INFO] Ignite failure URL: $S3_IGNITE_BOOTSTRAP_FAILURE"
-        echo "[INFO] Cassandra success URL: $S3_CASSANDRA_BOOTSTRAP_SUCCESS"
-        echo "[INFO] Cassandra failure URL: $S3_CASSANDRA_BOOTSTRAP_FAILURE"
-    fi
-
-    if [ "$NODE_TYPE" == "ganglia" ]; then
-        echo "[INFO] Ganglia Core download URL: $GANGLIA_CORE_DOWNLOAD_URL"
-        echo "[INFO] Ganglia Web download URL: $GANGLIA_WEB_DOWNLOAD_URL"
-        echo "[INFO] RRD download URL: $RRD_DOWNLOAD_URL"
-        echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL"
-        echo "[INFO] Logs URL: $S3_GANGLIA_LOGS"
-        echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER"
-        echo "[INFO] Ganglia master discovery URL: $S3_GANGLIA_MASTER_DISCOVERY"
-        echo "[INFO] Ganglia success URL: $S3_GANGLIA_BOOTSTRAP_SUCCESS"
-        echo "[INFO] Ganglia failure URL: $S3_GANGLIA_BOOTSTRAP_FAILURE"
-    fi
-}
-
-# Clone git repository
-gitClone()
-{
-    echo "[INFO] Cloning git repository $1 to $2"
-
-    rm -Rf $2
-
-    for i in 0 9;
-    do
-        git clone $1 $2
-
-        if [ $code -eq 0 ]; then
-            echo "[INFO] Git repository $1 was successfully cloned to $2"
-            return 0
-        fi
-
-        echo "[WARN] Failed to clone git repository $1 from $i attempt, sleeping extra 5sec"
-        rm -Rf $2
-        sleep 5s
-    done
-
-    terminate "All 10 attempts to clone git repository $1 are failed"
-}
-
-# Applies specified tag to EC2 instance
-createTag()
-{
-    if [ -z "$EC2_INSTANCE_REGION" ]; then
-        EC2_AVAIL_ZONE=`curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone`
-        EC2_INSTANCE_REGION="`echo \"$EC2_AVAIL_ZONE\" | sed -e 's:\([0-9][0-9]*\)[a-z]*\$:\\1:'`"
-        export EC2_INSTANCE_REGION
-        echo "[INFO] EC2 instance region: $EC2_INSTANCE_REGION"
-    fi
-
-    for i in 0 9;
-    do
-        aws ec2 create-tags --resources $1 --tags Key=$2,Value=$3 --region $EC2_INSTANCE_REGION
-        if [ $? -eq 0 ]; then
-            return 0
-        fi
-
-        echo "[WARN] $i attempt to tag EC2 instance $1 with $2=$3 is failed, sleeping extra 5sec"
-        sleep 5s
-    done
-
-    terminate "All 10 attempts to tag EC2 instance $1 with $2=$3 are failed"
-}
-
-# Applies 'owner', 'project' and 'Name' tags to EC2 instance
-tagInstance()
-{
-    export EC2_HOME=/opt/aws/apitools/ec2
-    export JAVA_HOME=/opt/java
-    export PATH=$JAVA_HOME/bin:$EC2_HOME/bin:$PATH
-
-    INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to get instance metadata to tag it"
-        exit 1
-    fi
-
-    INSTANCE_NAME=
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        INSTANCE_NAME=$EC2_CASSANDRA_TAG
-    elif [ "$NODE_TYPE" == "ignite" ]; then
-        INSTANCE_NAME=$EC2_IGNITE_TAG
-    elif [ "$NODE_TYPE" == "test" ]; then
-        INSTANCE_NAME=$EC2_TEST_TAG
-    elif [ "$NODE_TYPE" == "ganglia" ]; then
-        INSTANCE_NAME=$EC2_GANGLIA_TAG
-    fi
-
-    if [ -n "$INSTANCE_NAME" ]; then
-        createTag "$INSTANCE_ID" "Name" "${INSTANCE_NAME}"
-    fi
-
-    if [ -n "$EC2_OWNER_TAG" ]; then
-        createTag "$INSTANCE_ID" "owner" "${EC2_OWNER_TAG}"
-    fi
-
-    if [ -n "$EC2_PROJECT_TAG" ]; then
-        createTag "$INSTANCE_ID" "project" "${EC2_PROJECT_TAG}"
-    fi
-}
-
-# Sets NODE_TYPE env variable
-setNodeType()
-{
-    if [ -n "$1" ]; then
-        NEW_NODE_TYPE=$NODE_TYPE
-        NODE_TYPE=$1
-    else
-        NEW_NODE_TYPE=
-    fi
-}
-
-# Reverts NODE_TYPE env variable to previous value
-revertNodeType()
-{
-    if [ -n "$NEW_NODE_TYPE" ]; then
-        NODE_TYPE=$NEW_NODE_TYPE
-        NEW_NODE_TYPE=
-    fi
-}
-
-# Returns logs folder for the node (Cassandra, Ignite, Tests)
-getLocalLogsFolder()
-{
-    setNodeType $1
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        echo "/opt/cassandra/logs"
-    elif [ "$NODE_TYPE" == "ignite" ]; then
-        echo "/opt/ignite/work/log"
-    elif [ "$NODE_TYPE" == "test" ]; then
-        echo "/opt/ignite-cassandra-tests/logs"
-    elif [ "$NODE_TYPE" == "ganglia" ]; then
-        echo ""
-    fi
-
-    revertNodeType
-}
-
-# Returns S3 URL to discover this node
-getDiscoveryUrl()
-{
-    setNodeType $1
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        echo "$S3_CASSANDRA_NODES_DISCOVERY"
-    elif [ "$NODE_TYPE" == "ignite" ]; then
-        echo "$S3_IGNITE_NODES_DISCOVERY"
-    elif [ "$NODE_TYPE" == "test" ]; then
-        echo "$S3_TESTS_NODES_DISCOVERY"
-    elif [ "$NODE_TYPE" == "ganglia" ]; then
-        echo "$S3_GANGLIA_MASTER_DISCOVERY"
-    fi
-
-    revertNodeType
-}
-
-# Returns S3 URL used as a join lock, used by nodes to join cluster sequentially
-getJoinLockUrl()
-{
-    setNodeType $1
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        echo "$S3_CASSANDRA_NODES_JOIN_LOCK"
-    elif [ "$NODE_TYPE" == "ignite" ]; then
-        echo "$S3_IGNITE_NODES_JOIN_LOCK"
-    fi
-
-    revertNodeType
-}
-
-# Returns S3 URL used to select first node for the cluster. The first node is responsible
-# for doing all routine work (clean S3 logs/test results from previous execution) on cluster startup
-getFirstNodeLockUrl()
-{
-    setNodeType $1
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        echo "$S3_CASSANDRA_FIRST_NODE_LOCK"
-    elif [ "$NODE_TYPE" == "ignite" ]; then
-        echo "$S3_IGNITE_FIRST_NODE_LOCK"
-    elif [ "$NODE_TYPE" == "test" ]; then
-        echo "$S3_TESTS_FIRST_NODE_LOCK"
-    fi
-
-    revertNodeType
-}
-
-# Returns S3 success URL for the node - folder created in S3 in case node successfully started and containing node logs
-getSucessUrl()
-{
-    setNodeType $1
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        echo "$S3_CASSANDRA_BOOTSTRAP_SUCCESS"
-    elif [ "$NODE_TYPE" == "ignite" ]; then
-        echo "$S3_IGNITE_BOOTSTRAP_SUCCESS"
-    elif [ "$NODE_TYPE" == "test" ]; then
-        echo "$S3_TESTS_SUCCESS"
-    elif [ "$NODE_TYPE" == "ganglia" ]; then
-        echo "$S3_GANGLIA_BOOTSTRAP_SUCCESS"
-    fi
-
-    revertNodeType
-}
-
-# Returns S3 failure URL for the node - folder created in S3 in case node failed to start and containing node logs
-getFailureUrl()
-{
-    setNodeType $1
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        echo "$S3_CASSANDRA_BOOTSTRAP_FAILURE"
-    elif [ "$NODE_TYPE" == "ignite" ]; then
-        echo "$S3_IGNITE_BOOTSTRAP_FAILURE"
-    elif [ "$NODE_TYPE" == "test" ]; then
-        echo "$S3_TESTS_FAILURE"
-    elif [ "$NODE_TYPE" == "ganglia" ]; then
-        echo "$S3_GANGLIA_BOOTSTRAP_FAILURE"
-    fi
-
-    revertNodeType
-}
-
-# Terminates script execution, unregisters node and removes all the locks (join lock, first node lock) created by it
-terminate()
-{
-    SUCCESS_URL=$(getSucessUrl)
-    FAILURE_URL=$(getFailureUrl)
-
-    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
-        SUCCESS_URL=${SUCCESS_URL}/
-    fi
-
-    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
-        FAILURE_URL=${FAILURE_URL}/
-    fi
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    msg=$HOST_NAME
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Failed to start $NODE_TYPE node"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-        reportFolder=${FAILURE_URL}${HOST_NAME}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] $NODE_TYPE node successfully started"
-        echo "[INFO]-----------------------------------------------------"
-        reportFolder=${SUCCESS_URL}${HOST_NAME}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/ignite-cassandra-tests/bootstrap/start_result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to drop report folder: $reportFolder"
-    fi
-
-    localLogs=$(getLocalLogsFolder)
-
-    if [ -d "$localLogs" ]; then
-        aws s3 sync --sse AES256 $localLogs $reportFolder
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to export $NODE_TYPE logs to: $reportFolder"
-        fi
-    fi
-
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/bootstrap/start_result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to export node start result to: $reportFile"
-    fi
-
-    rm -f /opt/ignite-cassandra-tests/bootstrap/start_result /opt/ignite-cassandra-tests/bootstrap/join-lock /opt/ignite-cassandra-tests/bootstrap/first-node-lock
-
-    removeClusterJoinLock
-
-    if [ "$NODE_TYPE" == "test" ]; then
-        aws s3 rm ${S3_TESTS_RUNNING}${HOST_NAME}
-        aws s3 rm ${S3_TESTS_WAITING}${HOST_NAME}
-        aws s3 rm ${S3_TESTS_IDLE}${HOST_NAME}
-        aws s3 rm ${S3_TESTS_PREPARING}${HOST_NAME}
-        unregisterNode
-    fi
-
-    if [ -n "$1" ]; then
-        removeFirstNodeLock
-        unregisterNode
-        exit 1
-    fi
-
-    exit 0
-}
-
-# Registers node by creating a file having node hostname inside specific folder in S3
-registerNode()
-{
-    DISCOVERY_URL=$(getDiscoveryUrl)
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    echo "[INFO] Registering $NODE_TYPE node: ${DISCOVERY_URL}${HOST_NAME}"
-
-    aws s3 cp --sse AES256 /etc/hosts ${DISCOVERY_URL}${HOST_NAME}
-    if [ $? -ne 0 ]; then
-        terminate "Failed to register $NODE_TYPE node info in: ${DISCOVERY_URL}${HOST_NAME}"
-    fi
-
-    echo "[INFO] $NODE_TYPE node successfully registered"
-}
-
-# Unregisters node by removing a file having node hostname inside specific folder in S3
-unregisterNode()
-{
-    DISCOVERY_URL=$(getDiscoveryUrl)
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    echo "[INFO] Removing $NODE_TYPE node registration from: ${DISCOVERY_URL}${HOST_NAME}"
-
-    exists=$(aws s3 ls ${DISCOVERY_URL}${HOST_NAME})
-
-    if [ -n "$exists" ]; then
-        aws s3 rm ${DISCOVERY_URL}${HOST_NAME}
-
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to remove $NODE_TYPE node registration"
-        else
-            echo "[INFO] $NODE_TYPE node registration removed"
-        fi
-    else
-        echo "[INFO] Node registration actually haven't been previously created"
-    fi
-}
-
-# Cleans up all nodes metadata for particular cluster (Cassandra, Ignite, Tests). Performed only by the node acquired
-# first node lock.
-cleanupMetadata()
-{
-    DISCOVERY_URL=$(getDiscoveryUrl)
-    JOIN_LOCK_URL=$(getJoinLockUrl)
-    SUCCESS_URL=$(getSucessUrl)
-    FAILURE_URL=$(getFailureUrl)
-
-    echo "[INFO] Running metadata cleanup"
-
-    aws s3 rm $JOIN_LOCK_URL
-    aws s3 rm --recursive $DISCOVERY_URL
-    aws s3 rm --recursive $SUCCESS_URL
-    aws s3 rm --recursive $FAILURE_URL
-
-    echo "[INFO] Metadata cleanup completed"
-}
-
-# Tries to get first node lock for the node. Only one (first) node can have such lock and it will be responsible for
-# cleanup process when starting cluster
-tryToGetFirstNodeLock()
-{
-    if [ "$FIRST_NODE_LOCK" == "true" ]; then
-        return 0
-    fi
-
-    FIRST_NODE_LOCK_URL=$(getFirstNodeLockUrl)
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    echo "[INFO] Trying to get first node lock: $FIRST_NODE_LOCK_URL"
-
-    checkFirstNodeLockExist $FIRST_NODE_LOCK_URL
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-
-    echo "$HOST_NAME" > /opt/ignite-cassandra-tests/bootstrap/first-node-lock
-
-    createFirstNodeLock $FIRST_NODE_LOCK_URL
-
-    sleep 5s
-
-    rm -Rf /opt/ignite-cassandra-tests/bootstrap/first-node-lock
-
-    aws s3 cp $FIRST_NODE_LOCK_URL /opt/ignite-cassandra-tests/bootstrap/first-node-lock
-    if [ $? -ne 0 ]; then
-        echo "[WARN] Failed to check just created first node lock"
-        return 1
-    fi
-
-    first_host=$(cat /opt/ignite-cassandra-tests/bootstrap/first-node-lock)
-
-    rm -f /opt/ignite-cassandra-tests/bootstrap/first-node-lock
-
-    if [ "$first_host" != "$HOST_NAME" ]; then
-        echo "[INFO] Node $first_host has discarded previously created first node lock"
-        return 1
-    fi
-
-    echo "[INFO] Congratulations, got first node lock"
-
-    FIRST_NODE_LOCK="true"
-
-    return 0
-}
-
-# Checks if first node lock already exists in S3
-checkFirstNodeLockExist()
-{
-    echo "[INFO] Checking for the first node lock: $1"
-
-    lockExists=$(aws s3 ls $1)
-    if [ -n "$lockExists" ]; then
-        echo "[INFO] First node lock already exists"
-        return 1
-    fi
-
-    echo "[INFO] First node lock doesn't exist"
-
-    return 0
-}
-
-# Creates first node lock in S3
-createFirstNodeLock()
-{
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/bootstrap/first-node-lock $1
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create first node lock: $1"
-    fi
-
-    echo "[INFO] Created first node lock: $1"
-}
-
-# Removes first node lock from S3
-removeFirstNodeLock()
-{
-    if [ "$FIRST_NODE_LOCK" != "true" ]; then
-        return 0
-    fi
-
-    FIRST_NODE_LOCK_URL=$(getFirstNodeLockUrl)
-
-    echo "[INFO] Removing first node lock: $FIRST_NODE_LOCK_URL"
-
-    aws s3 rm $FIRST_NODE_LOCK_URL
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to remove first node lock: $FIRST_NODE_LOCK_URL"
-    fi
-
-    echo "[INFO] Removed first node lock: $FIRST_NODE_LOCK_URL"
-
-    FIRST_NODE_LOCK="false"
-}
-
-# Tries to get cluster join lock. Nodes use this lock to join a cluster sequentially.
-tryToGetClusterJoinLock()
-{
-    if [ "$JOIN_LOCK" == "true" ]; then
-        return 0
-    fi
-
-    JOIN_LOCK_URL=$(getJoinLockUrl)
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    echo "[INFO] Trying to get cluster join lock"
-
-    checkClusterJoinLockExist $JOIN_LOCK_URL
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-
-    echo "$HOST_NAME" > /opt/ignite-cassandra-tests/bootstrap/join-lock
-
-    createClusterJoinLock $JOIN_LOCK_URL
-
-    sleep 5s
-
-    rm -Rf /opt/ignite-cassandra-tests/bootstrap/join-lock
-
-    aws s3 cp $JOIN_LOCK_URL /opt/ignite-cassandra-tests/bootstrap/join-lock
-    if [ $? -ne 0 ]; then
-        echo "[WARN] Failed to check just created cluster join lock"
-        return 1
-    fi
-
-    join_host=$(cat /opt/ignite-cassandra-tests/bootstrap/join-lock)
-
-    if [ "$join_host" != "$HOST_NAME" ]; then
-        echo "[INFO] Node $first_host has discarded previously created cluster join lock"
-        return 1
-    fi
-
-    echo "[INFO] Congratulations, got cluster join lock"
-
-    JOIN_LOCK="true"
-
-    return 0
-}
-
-# Checks if join lock already exists in S3
-checkClusterJoinLockExist()
-{
-    echo "[INFO] Checking for the cluster join lock: $1"
-
-    lockExists=$(aws s3 ls $1)
-    if [ -n "$lockExists" ]; then
-        echo "[INFO] Cluster join lock already exists"
-        return 1
-    fi
-
-    if [ "$NODE_TYPE" == "cassandra" ]; then
-        status=$(/opt/cassandra/bin/nodetool -h $CASSANDRA_SEED status)
-        leaving=$(echo $status | grep UL)
-        moving=$(echo $status | grep UM)
-        joining=$(echo $status | grep UJ)
-
-        if [ -n "$leaving" ] || [ -n "$moving" ] || [ -n "$joining" ]; then
-            echo "[INFO] Cluster join lock doesn't exist in S3, but some node still trying to join Cassandra cluster"
-            return 1
-        fi
-    fi
-
-    echo "[INFO] Cluster join lock doesn't exist"
-}
-
-# Creates join lock in S3
-createClusterJoinLock()
-{
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/bootstrap/join-lock $1
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create cluster join lock: $1"
-    fi
-
-    echo "[INFO] Created cluster join lock: $1"
-}
-
-# Removes join lock
-removeClusterJoinLock()
-{
-    if [ "$JOIN_LOCK" != "true" ]; then
-        return 0
-    fi
-
-    JOIN_LOCK_URL=$(getJoinLockUrl)
-
-    echo "[INFO] Removing cluster join lock: $JOIN_LOCK_URL"
-
-    aws s3 rm $JOIN_LOCK_URL
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to remove cluster join lock: $JOIN_LOCK_URL"
-    fi
-
-    JOIN_LOCK="false"
-
-    echo "[INFO] Removed cluster join lock: $JOIN_LOCK_URL"
-}
-
-# Waits for the node to join cluster, periodically trying to acquire cluster join lock and exiting only when node
-# successfully acquired the lock. Such mechanism used by nodes to join cluster sequentially (limitation of Cassandra).
-waitToJoinCluster()
-{
-    echo "[INFO] Waiting to join $NODE_TYPE cluster"
-
-    while true; do
-        tryToGetClusterJoinLock
-
-        if [ $? -ne 0 ]; then
-            echo "[INFO] Another node is trying to join cluster. Waiting for extra 30sec."
-            sleep 30s
-        else
-            echo "[INFO]-------------------------------------------------------------"
-            echo "[INFO] Congratulations, got lock to join $NODE_TYPE cluster"
-            echo "[INFO]-------------------------------------------------------------"
-            break
-        fi
-    done
-}
-
-# Wait for the cluster to register at least one node in S3, so that all other nodes will use already existing nodes
-# to send them info about them and join the cluster
-setupClusterSeeds()
-{
-    if [ "$1" != "cassandra" ] && [ "$1" != "ignite" ] && [ "$1" != "test" ]; then
-        terminate "Incorrect cluster type specified '$1' to setup seeds"
-    fi
-
-    DISCOVERY_URL=$(getDiscoveryUrl $1)
-
-    echo "[INFO] Setting up $1 seeds"
-
-    echo "[INFO] Looking for $1 seeds in: $DISCOVERY_URL"
-
-    startTime=$(date +%s)
-
-    while true; do
-        seeds=$(aws s3 ls $DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g")
-        if [ -n "$seeds" ]; then
-            seeds=($seeds)
-            length=${#seeds[@]}
-
-            if [ $length -lt 4 ]; then
-                seed1=${seeds[0]}
-                seed2=${seeds[1]}
-                seed3=${seeds[2]}
-            else
-                pos1=$(($RANDOM%$length))
-                pos2=$(($RANDOM%$length))
-                pos3=$(($RANDOM%$length))
-                seed1=${seeds[${pos1}]}
-                seed2=${seeds[${pos2}]}
-                seed3=${seeds[${pos3}]}
-            fi
-
-            CLUSTER_SEEDS=$seed1
-
-            if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then
-                CLUSTER_SEEDS="$CLUSTER_SEEDS $seed2"
-            fi
-
-            if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then
-                CLUSTER_SEEDS="$CLUSTER_SEEDS $seed3"
-            fi
-
-            echo "[INFO] Using $1 seeds: $CLUSTER_SEEDS"
-
-            return 0
-        fi
-
-        currentTime=$(date +%s)
-        duration=$(( $currentTime-$startTime ))
-        duration=$(( $duration/60 ))
-
-        if [ "$2" == "true" ]; then
-            if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
-                terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first $1 node is still not up and running"
-            fi
-        fi
-
-        echo "[INFO] Waiting for the first $1 node to start and publish its seed, time passed ${duration}min"
-
-        sleep 30s
-    done
-}
-
-# Wait until first cluster node registered in S3
-waitFirstClusterNodeRegistered()
-{
-    DISCOVERY_URL=$(getDiscoveryUrl)
-
-    echo "[INFO] Waiting for the first $NODE_TYPE node to register in: $DISCOVERY_URL"
-
-    startTime=$(date +%s)
-
-    while true; do
-        exists=$(aws s3 ls $DISCOVERY_URL)
-        if [ -n "$exists" ]; then
-            break
-        fi
-
-        if [ "$1" == "true" ]; then
-            currentTime=$(date +%s)
-            duration=$(( $currentTime-$startTime ))
-            duration=$(( $duration/60 ))
-
-            if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
-                terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first $type node is still not up and running"
-            fi
-        fi
-
-        echo "[INFO] Waiting extra 30sec"
-
-        sleep 30s
-    done
-
-    echo "[INFO] First $type node registered"
-}
-
-# Waits until all cluster nodes successfully bootstrapped. In case of Tests cluster also waits until all nodes
-# switch to waiting state
-waitAllClusterNodesReady()
-{
-    if [ "$1" == "cassandra" ]; then
-        NODES_COUNT=$CASSANDRA_NODES_COUNT
-    elif [ "$1" == "ignite" ]; then
-        NODES_COUNT=$IGNITE_NODES_COUNT
-    elif [ "$1" == "test" ]; then
-        NODES_COUNT=$TEST_NODES_COUNT
-    else
-        terminate "Incorrect cluster type specified '$1' to wait for all nodes up and running"
-    fi
-
-    SUCCESS_URL=$(getSucessUrl $1)
-
-    if [ $NODES_COUNT -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[INFO] Waiting for all $NODES_COUNT $1 nodes ready"
-
-    while true; do
-        if [ "$1" == "test" ]; then
-            count1=$(aws s3 ls $S3_TESTS_WAITING | wc -l)
-            count2=$(aws s3 ls $S3_TESTS_RUNNING | wc -l)
-            count=$(( $count1+$count2 ))
-        else
-            count=$(aws s3 ls $SUCCESS_URL | wc -l)
-        fi
-
-        if [ $count -ge $NODES_COUNT ]; then
-            break
-        fi
-
-        echo "[INFO] Waiting extra 30sec"
-
-        sleep 30s
-    done
-
-    sleep 30s
-
-    echo "[INFO] Congratulation, all $NODES_COUNT $1 nodes are ready"
-}
-
-# Wait untill all Tests cluster nodes completed their tests execution
-waitAllTestNodesCompletedTests()
-{
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    echo "[INFO] Waiting for all $TEST_NODES_COUNT test nodes to complete their tests"
-
-    while true; do
-
-        count=$(aws s3 ls $S3_TESTS_RUNNING | grep -v $HOST_NAME | wc -l)
-
-        if [ $count -eq 0 ]; then
-            break
-        fi
-
-        echo "[INFO] Waiting extra 30sec"
-
-        sleep 30s
-    done
-
-    echo "[INFO] Congratulation, all $TEST_NODES_COUNT test nodes have completed their tests"
-}
-
-# Installs all required Ganglia packages
-installGangliaPackages()
-{
-    if [ "$1" == "master" ]; then
-        echo "[INFO] Installing Ganglia master required packages"
-    else
-        echo "[INFO] Installing Ganglia agent required packages"
-    fi
-
-    isAmazonLinux=$(cat "/etc/issue" | grep "Amazon Linux")
-
-    if [ -z "$isAmazonLinux" ]; then
-        setenforce 0
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to turn off SELinux"
-        fi
-
-        downloadPackage "$EPEL_DOWNLOAD_URL" "/opt/epel.rpm" "EPEL"
-
-        rpm -Uvh /opt/epel.rpm
-        if [ $? -ne 0 ]; then
-            terminate "Failed to setup EPEL repository"
-        fi
-
-        rm -f /opt/epel.rpm
-    fi
-
-    yum -y install apr-devel apr-util check-devel cairo-devel pango-devel pango \
-    libxml2-devel glib2-devel dbus-devel freetype-devel freetype \
-    libpng-devel libart_lgpl-devel fontconfig-devel gcc-c++ expat-devel \
-    python-devel libXrender-devel perl-devel perl-CPAN gettext git sysstat \
-    automake autoconf ltmain.sh pkg-config gperf libtool pcre-devel libconfuse-devel
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install all Ganglia required packages"
-    fi
-
-    if [ "$1" == "master" ]; then
-        yum -y install httpd php php-devel php-pear
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to install all Ganglia required packages"
-        fi
-
-        if [ -z "$isAmazonLinux" ]; then
-            yum -y install liberation-sans-fonts
-
-            if [ $? -ne 0 ]; then
-                terminate "Failed to install liberation-sans-fonts package"
-            fi
-        fi
-    fi
-
-    if [ -z "$isAmazonLinux" ]; then
-        downloadPackage "$GPERF_DOWNLOAD_URL" "/opt/gperf.tar.gz" "gperf"
-
-        tar -xvzf /opt/gperf.tar.gz -C /opt
-        if [ $? -ne 0 ]; then
-            terminate "Failed to untar gperf tarball"
-        fi
-
-        rm -Rf /opt/gperf.tar.gz
-
-        unzipDir=$(ls /opt | grep "gperf")
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to update creation date to current for all files inside: /opt/$unzipDir"
-        fi
-
-        pushd /opt/$unzipDir
-
-        cat ./configure | sed -r "s/test \"\\\$2\" = conftest.file/test 1 = 1/g" > ./configure1
-        rm ./configure
-        mv ./configure1 ./configure
-        chmod a+x ./configure
-
-        ./configure
-        if [ $? -ne 0 ]; then
-            terminate "Failed to configure gperf"
-        fi
-
-        make
-        if [ $? -ne 0 ]; then
-            terminate "Failed to make gperf"
-        fi
-
-        make install
-        if [ $? -ne 0 ]; then
-            terminate "Failed to install gperf"
-        fi
-
-        echo "[INFO] gperf tool successfully installed"
-
-        popd
-    fi
-
-    echo "[INFO] Installing rrdtool"
-
-    downloadPackage "$RRD_DOWNLOAD_URL" "/opt/rrdtool.tar.gz" "rrdtool"
-
-    tar -xvzf /opt/rrdtool.tar.gz -C /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to untar rrdtool tarball"
-    fi
-
-    rm -Rf /opt/rrdtool.tar.gz
-
-    unzipDir=$(ls /opt | grep "rrdtool")
-    if [ "$unzipDir" != "rrdtool" ]; then
-        mv /opt/$unzipDir /opt/rrdtool
-    fi
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to update creation date to current for all files inside: /opt/rrdtool"
-    fi
-
-    export PKG_CONFIG_PATH=/usr/lib/pkgconfig/
-
-    pushd /opt/rrdtool
-
-    cat ./configure | sed -r "s/test \"\\\$2\" = conftest.file/test 1 = 1/g" > ./configure1
-    rm ./configure
-    mv ./configure1 ./configure
-    chmod a+x ./configure
-
-    ./configure --prefix=/usr/local/rrdtool
-    if [ $? -ne 0 ]; then
-        terminate "Failed to configure rrdtool"
-    fi
-
-    make
-    if [ $? -ne 0 ]; then
-        terminate "Failed to make rrdtool"
-    fi
-
-    make install
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install rrdtool"
-    fi
-
-    ln -s /usr/local/rrdtool/bin/rrdtool /usr/bin/rrdtool
-    mkdir -p /var/lib/ganglia/rrds
-
-    chown -R nobody:nobody /usr/local/rrdtool /var/lib/ganglia/rrds /usr/bin/rrdtool
-
-    rm -Rf /opt/rrdtool
-
-    popd
-
-    echo "[INFO] rrdtool successfully installed"
-
-    echo "[INFO] Installig ganglia-core"
-
-    gitClone $GANGLIA_CORE_DOWNLOAD_URL /opt/monitor-core
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to update creation date to current for all files inside: /opt/monitor-core"
-    fi
-
-    pushd /opt/monitor-core
-
-    git checkout efe9b5e5712ea74c04e3b15a06eb21900e18db40
-
-    ./bootstrap
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to prepare ganglia-core for compilation"
-    fi
-
-    cat ./configure | sed -r "s/test \"\\\$2\" = conftest.file/test 1 = 1/g" > ./configure1
-    rm ./configure
-    mv ./configure1 ./configure
-    chmod a+x ./configure
-
-    ./configure --with-gmetad --with-librrd=/usr/local/rrdtool
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to configure ganglia-core"
-    fi
-
-    make
-    if [ $? -ne 0 ]; then
-        terminate "Failed to make ganglia-core"
-    fi
-
-    make install
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install ganglia-core"
-    fi
-
-    rm -Rf /opt/monitor-core
-
-    popd
-
-    echo "[INFO] ganglia-core successfully installed"
-
-    if [ "$1" != "master" ]; then
-        return 0
-    fi
-
-    echo "[INFO] Installing ganglia-web"
-
-    gitClone $GANGLIA_WEB_DOWNLOAD_URL /opt/web
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to update creation date to current for all files inside: /opt/web"
-    fi
-
-    cat /opt/web/Makefile | sed -r "s/GDESTDIR = \/usr\/share\/ganglia-webfrontend/GDESTDIR = \/opt\/ganglia-web/g" > /opt/web/Makefile1
-    cat /opt/web/Makefile1 | sed -r "s/GCONFDIR = \/etc\/ganglia-web/GCONFDIR = \/opt\/ganglia-web/g" > /opt/web/Makefile2
-    cat /opt/web/Makefile2 | sed -r "s/GWEB_STATEDIR = \/var\/lib\/ganglia-web/GWEB_STATEDIR = \/opt\/ganglia-web/g" > /opt/web/Makefile3
-    cat /opt/web/Makefile3 | sed -r "s/APACHE_USER = www-data/APACHE_USER = apache/g" > /opt/web/Makefile4
-
-    rm -f /opt/web/Makefile
-    cp /opt/web/Makefile4 /opt/web/Makefile
-    rm -f /opt/web/Makefile1 /opt/web/Makefile2 /opt/web/Makefile3 /opt/web/Makefile4
-
-    pushd /opt/web
-
-    git checkout f2b19c7cacfc8c51921be801b92f8ed0bd4901ae
-
-    make
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to make ganglia-web"
-    fi
-
-    make install
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install ganglia-web"
-    fi
-
-    rm -Rf /opt/web
-
-    popd
-
-    echo "" >> /etc/httpd/conf/httpd.conf
-    echo "Alias /ganglia /opt/ganglia-web" >> /etc/httpd/conf/httpd.conf
-    echo "<Directory \"/opt/ganglia-web\">" >> /etc/httpd/conf/httpd.conf
-    echo "       AllowOverride All" >> /etc/httpd/conf/httpd.conf
-    echo "       Order allow,deny" >> /etc/httpd/conf/httpd.conf
-
-    if [ -z "$isAmazonLinux" ]; then
-        echo "       Require all granted" >> /etc/httpd/conf/httpd.conf
-    fi
-
-    echo "       Allow from all" >> /etc/httpd/conf/httpd.conf
-    echo "       Deny from none" >> /etc/httpd/conf/httpd.conf
-    echo "</Directory>" >> /etc/httpd/conf/httpd.conf
-
-    echo "[INFO] ganglia-web successfully installed"
-}
-
-# Setup ntpd service
-setupNTP()
-{
-    echo "[INFO] Installing ntp package"
-
-    yum -y install ntp
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install ntp package"
-    fi
-
-    echo "[INFO] Starting ntpd service"
-
-    service ntpd restart
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to restart ntpd service"
-    fi
-}
-
-# Installs and run Ganglia agent ('gmond' daemon)
-bootstrapGangliaAgent()
-{
-    echo "[INFO]-----------------------------------------------------------------"
-    echo "[INFO] Bootstrapping Ganglia agent"
-    echo "[INFO]-----------------------------------------------------------------"
-
-    installGangliaPackages
-
-    echo "[INFO] Running ganglia agent daemon to discover Ganglia master"
-
-    /opt/ignite-cassandra-tests/bootstrap/aws/ganglia/agent-start.sh $1 $2 > /opt/ganglia-agent.log &
-
-    echo "[INFO] Ganglia daemon job id: $!"
-}
-
-# Partitioning, formatting to ext4 and mounting all unpartitioned drives.
-# As a result env array MOUNT_POINTS provides all newly created mount points.
-mountUnpartitionedDrives()
-{
-    MOUNT_POINTS=
-
-    echo "[INFO] Mounting unpartitioned drives"
-
-    lsblk -V &> /dev/null
-
-    if [ $? -ne 0 ]; then
-        echo "[WARN] lsblk utility doesn't exist"
-        echo "[INFO] Installing util-linux-ng package"
-
-        yum -y install util-linux-ng
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to install util-linux-ng package"
-        fi
-    fi
-
-    parted -v &> /dev/null
-
-    if [ $? -ne 0 ]; then
-        echo "[WARN] parted utility doesn't exist"
-        echo "[INFO] Installing parted package"
-
-        yum -y install parted
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to install parted package"
-        fi
-    fi
-
-    drives=$(lsblk -io KNAME,TYPE | grep disk | sed -r "s/disk//g" | xargs)
-
-    echo "[INFO] Found HDDs: $drives"
-
-    unpartDrives=
-    partDrives=$(lsblk -io KNAME,TYPE | grep part | sed -r "s/[0-9]*//g" | sed -r "s/part//g" | xargs)
-
-    drives=($drives)
-	count=${#drives[@]}
-	iter=1
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-	do
-		drive=${drives[$i]}
-
-        if [ -z "$drive" ]; then
-            continue
-        fi
-
-        isPartitioned=$(echo $partDrives | grep "$drive")
-
-        if [ -n "$isPartitioned" ]; then
-            continue
-        fi
-
-        echo "[INFO] Creating partition for the drive: $drive"
-
-        parted -s -a opt /dev/$drive mklabel gpt mkpart primary 0% 100%
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create partition for the drive: $drive"
-        fi
-
-        partition=$(lsblk -io KNAME,TYPE | grep part | grep $drive | sed -r "s/part//g" | xargs)
-
-        echo "[INFO] Successfully created partition $partition for the drive: $drive"
-
-        echo "[INFO] Formatting partition /dev/$partition to ext4"
-
-        mkfs.ext4 -F -q /dev/$partition
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to format partition: /dev/$partition"
-        fi
-
-        echo "[INFO] Partition /dev/$partition was successfully formatted to ext4"
-
-        echo "[INFO] Mounting partition /dev/$partition to /storage$iter"
-
-        mkdir -p /storage$iter
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create mount point directory: /storage$iter"
-        fi
-
-        echo "/dev/$partition               /storage$iter               ext4    defaults        1 1" >> /etc/fstab
-
-        mount /storage$iter
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to mount /storage$iter mount point for partition /dev/$partition"
-        fi
-
-        echo "[INFO] Partition /dev/$partition was successfully mounted to /storage$iter"
-
-        if [ -n "$MOUNT_POINTS" ]; then
-            MOUNT_POINTS="$MOUNT_POINTS "
-        fi
-
-        MOUNT_POINTS="${MOUNT_POINTS}/storage${iter}"
-
-        iter=$(($iter+1))
-    done
-
-    if [ -z "$MOUNT_POINTS" ]; then
-        echo "[INFO] All drives already have partitions created"
-    fi
-
-    MOUNT_POINTS=($MOUNT_POINTS)
-}
-
-# Creates storage directories for Cassandra: data files, commit log, saved caches.
-# As a result CASSANDRA_DATA_DIR, CASSANDRA_COMMITLOG_DIR, CASSANDRA_CACHES_DIR will point to appropriate directories.
-createCassandraStorageLayout()
-{
-    CASSANDRA_DATA_DIR=
-    CASSANDRA_COMMITLOG_DIR=
-    CASSANDRA_CACHES_DIR=
-
-    mountUnpartitionedDrives
-
-    echo "[INFO] Creating Cassandra storage layout"
-
-	count=${#MOUNT_POINTS[@]}
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-    do
-        mountPoint=${MOUNT_POINTS[$i]}
-
-        if [ -z "$CASSANDRA_DATA_DIR" ]; then
-            CASSANDRA_DATA_DIR=$mountPoint
-        elif [ -z "$CASSANDRA_COMMITLOG_DIR" ]; then
-            CASSANDRA_COMMITLOG_DIR=$mountPoint
-        elif [ -z "$CASSANDRA_CACHES_DIR" ]; then
-            CASSANDRA_CACHES_DIR=$mountPoint
-        else
-            CASSANDRA_DATA_DIR="$CASSANDRA_DATA_DIR $mountPoint"
-        fi
-    done
-
-    if [ -z "$CASSANDRA_DATA_DIR" ]; then
-        CASSANDRA_DATA_DIR="/storage/cassandra/data"
-    else
-        CASSANDRA_DATA_DIR="$CASSANDRA_DATA_DIR/cassandra_data"
-    fi
-
-    if [ -z "$CASSANDRA_COMMITLOG_DIR" ]; then
-        CASSANDRA_COMMITLOG_DIR="/storage/cassandra/commitlog"
-    else
-        CASSANDRA_COMMITLOG_DIR="$CASSANDRA_COMMITLOG_DIR/cassandra_commitlog"
-    fi
-
-    if [ -z "$CASSANDRA_CACHES_DIR" ]; then
-        CASSANDRA_CACHES_DIR="/storage/cassandra/saved_caches"
-    else
-        CASSANDRA_CACHES_DIR="$CASSANDRA_CACHES_DIR/cassandra_caches"
-    fi
-
-    echo "[INFO] Cassandra data dir: $CASSANDRA_DATA_DIR"
-    echo "[INFO] Cassandra commit log dir: $CASSANDRA_COMMITLOG_DIR"
-    echo "[INFO] Cassandra saved caches dir: $CASSANDRA_CACHES_DIR"
-
-    dirs=("$CASSANDRA_DATA_DIR $CASSANDRA_COMMITLOG_DIR $CASSANDRA_CACHES_DIR")
-
-	count=${#dirs[@]}
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-    do
-        directory=${dirs[$i]}
-
-        mkdir -p $directory
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create directory: $directory"
-        fi
-
-        chown -R cassandra:cassandra $directory
-
-        if [ $? -ne 0 ]; then
-            terminate "Failed to assign cassandra:cassandra as an owner of directory $directory"
-        fi
-    done
-
-    DATA_DIR_SPEC="\n"
-
-    dirs=($CASSANDRA_DATA_DIR)
-
-	count=${#dirs[@]}
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-    do
-        dataDir=${dirs[$i]}
-        DATA_DIR_SPEC="${DATA_DIR_SPEC}     - ${dataDir}\n"
-    done
-
-    CASSANDRA_DATA_DIR=$(echo $DATA_DIR_SPEC | sed -r "s/\//\\\\\//g")
-    CASSANDRA_COMMITLOG_DIR=$(echo $CASSANDRA_COMMITLOG_DIR | sed -r "s/\//\\\\\//g")
-    CASSANDRA_CACHES_DIR=$(echo $CASSANDRA_CACHES_DIR | sed -r "s/\//\\\\\//g")
-}
-
-# Attaches environment configuration settings
-. $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/env.sh
-
-# Validates environment settings
-validate
-
-# Validates node type of EC2 instance
-if [ "$1" != "cassandra" ] && [ "$1" != "ignite" ] && [ "$1" != "test" ] && [ "$1" != "ganglia" ]; then
-    echo "[ERROR] Unsupported node type specified: $1"
-    exit 1
-fi
-
-# Sets node type of EC2 instance
-export NODE_TYPE=$1
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/env.sh b/modules/cassandra/store/src/test/bootstrap/aws/env.sh
deleted file mode 100644
index 031c5c3a1628a..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/env.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# This file specifies environment specific settings to bootstrap required infrastructure for:
-# -----------------------------------------------------------------------------------------------
-#
-#   1) Cassandra cluster
-#   2) Ignite cluster
-#   3) Tests cluster
-#   4) Ganglia agents to be installed on each clusters machine
-#   5) Ganglia master to collect metrics from agent and show graphs on Ganglia Web dashboard
-#
-# -----------------------------------------------------------------------------------------------
-
-# EC2 tagging related settings
-export EC2_OWNER_TAG=ignite@apache.org
-export EC2_PROJECT_TAG=ignite
-export EC2_CASSANDRA_TAG=CASSANDRA
-export EC2_IGNITE_TAG=IGNITE
-export EC2_TEST_TAG=TEST
-export EC2_GANGLIA_TAG=GANGLIA
-
-# Tests summary settings
-export CASSANDRA_NODES_COUNT=3
-export IGNITE_NODES_COUNT=3
-export TEST_NODES_COUNT=2
-export TESTS_TYPE="ignite"
-
-# Time (in minutes) to wait for Cassandra/Ignite node up and running and register it in S3
-export SERVICE_STARTUP_TIME=10
-
-# Number of attempts to start Cassandra/Ignite daemon
-export SERVICE_START_ATTEMPTS=3
-
-# Root S3 folder
-export S3_ROOT=s3://<bucket>/<folder>
-
-# S3 folder for downloads. You should put here ignite load tests jar archive
-# (you can also download here other required artifacts like Cassandra, Ignite and etc)
-export S3_DOWNLOADS=$S3_ROOT/test
-
-# S3 root system folders where to store all infrastructure info
-export S3_SYSTEM=$S3_ROOT/test1
-
-# S3 system folders to store cluster specific info
-export S3_CASSANDRA_SYSTEM=$S3_SYSTEM/cassandra
-export S3_IGNITE_SYSTEM=$S3_SYSTEM/ignite
-export S3_TESTS_SYSTEM=$S3_SYSTEM/tests
-export S3_GANGLIA_SYSTEM=$S3_SYSTEM/ganglia
-
-# Logs related settings
-export S3_LOGS_TRIGGER=$S3_SYSTEM/logs-trigger
-export S3_LOGS_ROOT=$S3_SYSTEM/logs
-export S3_CASSANDRA_LOGS=$S3_LOGS_ROOT/cassandra
-export S3_IGNITE_LOGS=$S3_LOGS_ROOT/ignite
-export S3_TESTS_LOGS=$S3_LOGS_ROOT/tests
-export S3_GANGLIA_LOGS=$S3_LOGS_ROOT/ganglia
-
-# Cassandra related settings
-export CASSANDRA_DOWNLOAD_URL=http://archive.apache.org/dist/cassandra/3.5/apache-cassandra-3.5-bin.tar.gz
-export S3_CASSANDRA_BOOTSTRAP_SUCCESS=$S3_CASSANDRA_SYSTEM/success
-export S3_CASSANDRA_BOOTSTRAP_FAILURE=$S3_CASSANDRA_SYSTEM/failure
-export S3_CASSANDRA_NODES_DISCOVERY=$S3_CASSANDRA_SYSTEM/discovery
-export S3_CASSANDRA_FIRST_NODE_LOCK=$S3_CASSANDRA_SYSTEM/first-node-lock
-export S3_CASSANDRA_NODES_JOIN_LOCK=$S3_CASSANDRA_SYSTEM/join-lock
-
-# Ignite related settings
-export IGNITE_DOWNLOAD_URL=$S3_DOWNLOADS/apache-ignite-fabric-1.8.0-SNAPSHOT-bin.zip
-export S3_IGNITE_BOOTSTRAP_SUCCESS=$S3_IGNITE_SYSTEM/success
-export S3_IGNITE_BOOTSTRAP_FAILURE=$S3_IGNITE_SYSTEM/failure
-export S3_IGNITE_NODES_DISCOVERY=$S3_IGNITE_SYSTEM/discovery
-export S3_IGNITE_FIRST_NODE_LOCK=$S3_IGNITE_SYSTEM/first-node-lock
-export S3_IGNITE_NODES_JOIN_LOCK=$S3_IGNITE_SYSTEM/i-join-lock
-
-# Tests related settings
-export TESTS_PACKAGE_DONLOAD_URL=$S3_DOWNLOADS/ignite-cassandra-tests-1.8.0-SNAPSHOT.zip
-export S3_TESTS_TRIGGER=$S3_SYSTEM/tests-trigger
-export S3_TESTS_NODES_DISCOVERY=$S3_TESTS_SYSTEM/discovery
-export S3_TESTS_SUCCESS=$S3_TESTS_SYSTEM/success
-export S3_TESTS_FAILURE=$S3_TESTS_SYSTEM/failure
-export S3_TESTS_IDLE=$S3_TESTS_SYSTEM/idle
-export S3_TESTS_PREPARING=$S3_TESTS_SYSTEM/preparing
-export S3_TESTS_WAITING=$S3_TESTS_SYSTEM/waiting
-export S3_TESTS_RUNNING=$S3_TESTS_SYSTEM/running
-export S3_TESTS_FIRST_NODE_LOCK=$S3_TESTS_SYSTEM/first-node-lock
-export S3_TESTS_SUMMARY=$S3_SYSTEM/t-summary.zip
-
-# Ganglia related settings
-export GANGLIA_CORE_DOWNLOAD_URL=https://github.com/ganglia/monitor-core.git
-export GANGLIA_WEB_DOWNLOAD_URL=https://github.com/ganglia/ganglia-web.git
-export RRD_DOWNLOAD_URL=http://oss.oetiker.ch/rrdtool/pub/rrdtool-1.3.1.tar.gz
-export GPERF_DOWNLOAD_URL=http://ftp.gnu.org/gnu/gperf/gperf-3.0.3.tar.gz
-export EPEL_DOWNLOAD_URL=https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-export S3_GANGLIA_BOOTSTRAP_SUCCESS=$S3_GANGLIA_SYSTEM/success
-export S3_GANGLIA_BOOTSTRAP_FAILURE=$S3_GANGLIA_SYSTEM/failure
-export S3_GANGLIA_MASTER_DISCOVERY=$S3_GANGLIA_SYSTEM/discovery
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ganglia/agent-start.sh b/modules/cassandra/store/src/test/bootstrap/aws/ganglia/agent-start.sh
deleted file mode 100644
index 8e49c1844bd9c..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/ganglia/agent-start.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Script to start Ganglia agent on EC2 node (used by agent-bootstrap.sh)
-# -----------------------------------------------------------------------------------------------
-
-. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ganglia"
-
-echo "[INFO] Running Ganglia agent discovery daemon for '$1' cluster using $2 port"
-
-# Waiting for the Ganglia master node up and running
-waitFirstClusterNodeRegistered
-
-DISCOVERY_URL=$(getDiscoveryUrl)
-
-masterNode=$(aws s3 ls $DISCOVERY_URL | head -1)
-masterNode=($masterNode)
-masterNode=${masterNode[3]}
-masterNode=$(echo $masterNode | xargs)
-
-if [ $? -ne 0 ] || [ -z "$masterNode" ]; then
-    echo "[ERROR] Failed to get Ganglia master node from: $DISCOVERY_URL"
-fi
-
-echo "[INFO] Got Ganglia master node: $masterNode"
-
-echo "[INFO] Creating gmond config file"
-
-/usr/local/sbin/gmond --default_config > /opt/gmond-default.conf
-
-cat /opt/gmond-default.conf | sed -r "s/deaf = no/deaf = yes/g" | \
-sed -r "s/name = \"unspecified\"/name = \"$1\"/g" | \
-sed -r "s/#bind_hostname/bind_hostname/g" | \
-sed "0,/mcast_join = 239.2.11.71/s/mcast_join = 239.2.11.71/host = $masterNode/g" | \
-sed -r "s/mcast_join = 239.2.11.71//g" | sed -r "s/bind = 239.2.11.71//g" | \
-sed -r "s/port = 8649/port = $2/g" | sed -r "s/retry_bind = true//g" > /opt/gmond.conf
-
-echo "[INFO] Running gmond daemon to report to gmetad on $masterNode"
-
-/usr/local/sbin/gmond --conf=/opt/gmond.conf -p /opt/gmond.pid
-
-sleep 2s
-
-if [ ! -f "/opt/gmond.pid" ]; then
-    echo "[ERROR] Failed to start gmond daemon, pid file doesn't exist"
-    exit 1
-fi
-
-pid=$(cat /opt/gmond.pid)
-
-echo "[INFO] gmond daemon started, pid=$pid"
-
-exists=$(ps $pid | grep gmond)
-
-if [ -z "$exists" ]; then
-    echo "[ERROR] gmond daemon abnormally terminated"
-    exit 1
-fi
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ganglia/ganglia-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/ganglia/ganglia-bootstrap.sh
deleted file mode 100644
index 15fa044550287..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/ganglia/ganglia-bootstrap.sh
+++ /dev/null
@@ -1,417 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Bootstrap script to spin up Ganglia master
-# -----------------------------------------------------------------------------------------------
-
-# URL to download AWS CLI tools
-AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
-
-# URL to download JDK
-JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
-
-# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
-TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
-
-# Terminates script execution and upload logs to S3
-terminate()
-{
-    SUCCESS_URL=$S3_GANGLIA_BOOTSTRAP_SUCCESS
-    FAILURE_URL=$S3_GANGLIA_BOOTSTRAP_FAILURE
-
-    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
-        SUCCESS_URL=${SUCCESS_URL}/
-    fi
-
-    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
-        FAILURE_URL=${FAILURE_URL}/
-    fi
-
-    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    msg=$host_name
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Ganglia master node bootstrap failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-
-        if [ -z "$FAILURE_URL" ]; then
-            exit 1
-        fi
-
-        reportFolder=${FAILURE_URL}${host_name}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Ganglia master node bootstrap successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-
-        if [ -z "$SUCCESS_URL" ]; then
-            exit 0
-        fi
-
-        reportFolder=${SUCCESS_URL}${host_name}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/bootstrap-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to drop report folder: $reportFolder"
-    fi
-
-    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
-    fi
-
-    rm -f /opt/bootstrap-result
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-# Downloads specified package
-downloadPackage()
-{
-    echo "[INFO] Downloading $3 package from $1 into $2"
-
-    for i in 0 9;
-    do
-        if [[ "$1" == s3* ]]; then
-            aws s3 cp $1 $2
-            code=$?
-        else
-            curl "$1" -o "$2"
-            code=$?
-        fi
-
-        if [ $code -eq 0 ]; then
-            echo "[INFO] $3 package successfully downloaded from $1 into $2"
-            return 0
-        fi
-
-        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
-        sleep 5s
-    done
-
-    terminate "All 10 attempts to download $3 package from $1 are failed"
-}
-
-# Downloads and setup JDK
-setupJava()
-{
-    rm -Rf /opt/java /opt/jdk.tar.gz
-
-    echo "[INFO] Downloading 'jdk'"
-    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
-    if [ $? -ne 0 ]; then
-        terminate "Failed to download 'jdk'"
-    fi
-
-    echo "[INFO] Untaring 'jdk'"
-    tar -xvzf /opt/jdk.tar.gz -C /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to untar 'jdk'"
-    fi
-
-    rm -Rf /opt/jdk.tar.gz
-
-    unzipDir=$(ls /opt | grep "jdk")
-    if [ "$unzipDir" != "java" ]; then
-        mv /opt/$unzipDir /opt/java
-    fi
-}
-
-# Downloads and setup AWS CLI
-setupAWSCLI()
-{
-    echo "[INFO] Installing 'awscli'"
-    pip install --upgrade awscli
-    if [ $? -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[ERROR] Failed to install 'awscli' using pip"
-    echo "[INFO] Trying to install awscli using zip archive"
-    echo "[INFO] Downloading awscli zip"
-
-    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
-
-    echo "[INFO] Unzipping awscli zip"
-    unzip /opt/awscli-bundle.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip awscli zip"
-    fi
-
-    rm -Rf /opt/awscli-bundle.zip
-
-    echo "[INFO] Installing awscli"
-    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install awscli"
-    fi
-
-    echo "[INFO] Successfully installed awscli from zip archive"
-}
-
-# Setup all the pre-requisites (packages, settings and etc.)
-setupPreRequisites()
-{
-    echo "[INFO] Installing 'wget' package"
-    yum -y install wget
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'wget' package"
-    fi
-
-    echo "[INFO] Installing 'net-tools' package"
-    yum -y install net-tools
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'net-tools' package"
-    fi
-
-    echo "[INFO] Installing 'python' package"
-    yum -y install python
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'python' package"
-    fi
-
-    echo "[INFO] Installing 'unzip' package"
-    yum -y install unzip
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'unzip' package"
-    fi
-
-    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
-
-    echo "[INFO] Installing 'pip'"
-    python /opt/get-pip.py
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'pip'"
-    fi
-}
-
-# Downloads and setup tests package
-setupTestsPackage()
-{
-    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
-
-    rm -Rf /opt/ignite-cassandra-tests
-
-    unzip /opt/ignite-cassandra-tests.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip tests package"
-    fi
-
-    rm -f /opt/ignite-cassandra-tests.zip
-
-    unzipDir=$(ls /opt | grep "ignite-cassandra")
-    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
-        mv /opt/$unzipDir /opt/ignite-cassandra-tests
-    fi
-
-    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
-
-    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ganglia"
-
-    setupNTP
-
-    echo "[INFO] Starting logs collector daemon"
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_GANGLIA_LOGS/$HOST_NAME" "/var/log/httpd" > /opt/logs-collector.log &
-
-    echo "[INFO] Logs collector daemon started: $!"
-
-    echo "----------------------------------------------------------------------------------------"
-    printInstanceInfo
-    echo "----------------------------------------------------------------------------------------"
-    tagInstance
-}
-
-# Creates config file for 'gmond' damon working in receiver mode
-createGmondReceiverConfig()
-{
-    /usr/local/sbin/gmond --default_config > /opt/gmond-default.conf
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create gmond default config in: /opt/gmond-default.txt"
-    fi
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    cat /opt/gmond-default.conf | sed -r "s/mute = no/mute = yes/g" | \
-    sed -r "s/name = \"unspecified\"/name = \"$1\"/g" | \
-    sed -r "s/#bind_hostname/bind_hostname/g" | \
-    sed "0,/mcast_join = 239.2.11.71/s/mcast_join = 239.2.11.71/host = $HOST_NAME/g" | \
-    sed -r "s/mcast_join = 239.2.11.71//g" | sed -r "s/bind = 239.2.11.71//g" | \
-    sed -r "s/port = 8649/port = $2/g" | sed -r "s/retry_bind = true//g" > /opt/gmond-${1}.conf
-
-    chmod a+r /opt/gmond-${1}.conf
-
-    rm -f /opt/gmond-default.conf
-}
-
-# Creates config file for 'gmond' damon working in sender-receiver mode
-createGmondSenderReceiverConfig()
-{
-    /usr/local/sbin/gmond --default_config > /opt/gmond-default.conf
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create gmond default config in: /opt/gmond-default.txt"
-    fi
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    cat /opt/gmond-default.conf | sed -r "s/name = \"unspecified\"/name = \"$1\"/g" | \
-    sed -r "s/#bind_hostname/bind_hostname/g" | \
-    sed "0,/mcast_join = 239.2.11.71/s/mcast_join = 239.2.11.71/host = $HOST_NAME/g" | \
-    sed -r "s/mcast_join = 239.2.11.71//g" | sed -r "s/bind = 239.2.11.71//g" | \
-    sed -r "s/port = 8649/port = $2/g" | sed -r "s/retry_bind = true//g" > /opt/gmond-${1}.conf
-
-    chmod a+r /opt/gmond-${1}.conf
-
-    rm -f /opt/gmond-default.conf
-}
-
-# Downloads and setup Ganglia (and dependency) packages
-setupGangliaPackages()
-{
-    installGangliaPackages "master"
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    echo "data_source \"cassandra\" ${HOST_NAME}:8641" > /opt/gmetad.conf
-    echo "data_source \"ignite\" ${HOST_NAME}:8642" >> /opt/gmetad.conf
-    echo "data_source \"test\" ${HOST_NAME}:8643" >> /opt/gmetad.conf
-    #echo "data_source \"ganglia\" ${HOST_NAME}:8644" >> /opt/gmetad.conf
-    echo "setuid_username \"nobody\"" >> /opt/gmetad.conf
-    echo "case_sensitive_hostnames 0" >> /opt/gmetad.conf
-
-    chmod a+r /opt/gmetad.conf
-
-    createGmondReceiverConfig cassandra 8641
-    createGmondReceiverConfig ignite 8642
-    createGmondReceiverConfig test 8643
-    #createGmondSenderReceiverConfig ganglia 8644
-}
-
-# Starts 'gmond' receiver damon
-startGmondReceiver()
-{
-    configFile=/opt/gmond-${1}.conf
-    pidFile=/opt/gmond-${1}.pid
-
-    echo "[INFO] Starting gmond receiver daemon for $1 cluster using config file: $configFile"
-
-    rm -f $pidFile
-
-    /usr/local/sbin/gmond --conf=$configFile --pid-file=$pidFile
-
-    sleep 2s
-
-    if [ ! -f "$pidFile" ]; then
-        terminate "Failed to start gmond daemon for $1 cluster, pid file doesn't exist"
-    fi
-
-    pid=$(cat $pidFile)
-
-    echo "[INFO] gmond daemon for $1 cluster started, pid=$pid"
-
-    exists=$(ps $pid | grep gmond)
-
-    if [ -z "$exists" ]; then
-        terminate "gmond daemon for $1 cluster abnormally terminated"
-    fi
-}
-
-# Starts 'gmetad' daemon
-startGmetadCollector()
-{
-    echo "[INFO] Starting gmetad daemon"
-
-    rm -f /opt/gmetad.pid
-
-    /usr/local/sbin/gmetad --conf=/opt/gmetad.conf --pid-file=/opt/gmetad.pid
-
-    sleep 2s
-
-    if [ ! -f "/opt/gmetad.pid" ]; then
-        terminate "Failed to start gmetad daemon, pid file doesn't exist"
-    fi
-
-    pid=$(cat /opt/gmetad.pid)
-
-    echo "[INFO] gmetad daemon started, pid=$pid"
-
-    exists=$(ps $pid | grep gmetad)
-
-    if [ -z "$exists" ]; then
-        terminate "gmetad daemon abnormally terminated"
-    fi
-}
-
-# Starts Apache 'httpd' service
-startHttpdService()
-{
-    echo "[INFO] Starting httpd service"
-
-    service httpd start
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to start httpd service"
-    fi
-
-    sleep 5s
-
-    exists=$(service httpd status | grep running)
-    if [ -z "$exists" ]; then
-        terminate "httpd service process terminated"
-    fi
-
-    echo "[INFO] httpd service successfully started"
-}
-
-###################################################################################################################
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Bootstrapping Ganglia master server"
-echo "[INFO]-----------------------------------------------------------------"
-
-setupPreRequisites
-setupJava
-setupAWSCLI
-setupTestsPackage
-setupGangliaPackages
-
-registerNode
-
-startGmondReceiver cassandra
-startGmondReceiver ignite
-startGmondReceiver test
-#startGmondReceiver ganglia
-startGmetadCollector
-startHttpdService
-
-terminate
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
deleted file mode 100644
index 7f97ea1b71956..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-bootstrap.sh
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Bootstrap script to spin up Ignite cluster
-# -----------------------------------------------------------------------------------------------
-
-# URL to download AWS CLI tools
-AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
-
-# URL to download JDK
-JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
-
-# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
-TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
-
-# Terminates script execution and upload logs to S3
-terminate()
-{
-    SUCCESS_URL=$S3_IGNITE_BOOTSTRAP_SUCCESS
-    FAILURE_URL=$S3_IGNITE_BOOTSTRAP_FAILURE
-
-    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
-        SUCCESS_URL=${SUCCESS_URL}/
-    fi
-
-    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
-        FAILURE_URL=${FAILURE_URL}/
-    fi
-
-    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    msg=$host_name
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Ignite node bootstrap failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-
-        if [ -z "$FAILURE_URL" ]; then
-            exit 1
-        fi
-
-        reportFolder=${FAILURE_URL}${host_name}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Ignite node bootstrap successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-
-        if [ -z "$SUCCESS_URL" ]; then
-            exit 0
-        fi
-
-        reportFolder=${SUCCESS_URL}${host_name}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/bootstrap-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to drop report folder: $reportFolder"
-    fi
-
-    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
-    fi
-
-    rm -f /opt/bootstrap-result
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-# Downloads specified package
-downloadPackage()
-{
-    echo "[INFO] Downloading $3 package from $1 into $2"
-
-    for i in 0 9;
-    do
-        if [[ "$1" == s3* ]]; then
-            aws s3 cp $1 $2
-            code=$?
-        else
-            curl "$1" -o "$2"
-            code=$?
-        fi
-
-        if [ $code -eq 0 ]; then
-            echo "[INFO] $3 package successfully downloaded from $1 into $2"
-            return 0
-        fi
-
-        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
-        sleep 5s
-    done
-
-    terminate "All 10 attempts to download $3 package from $1 are failed"
-}
-
-# Downloads and setup JDK
-setupJava()
-{
-    rm -Rf /opt/java /opt/jdk.tar.gz
-
-    echo "[INFO] Downloading 'jdk'"
-    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
-    if [ $? -ne 0 ]; then
-        terminate "Failed to download 'jdk'"
-    fi
-
-    echo "[INFO] Untaring 'jdk'"
-    tar -xvzf /opt/jdk.tar.gz -C /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to untar 'jdk'"
-    fi
-
-    rm -Rf /opt/jdk.tar.gz
-
-    unzipDir=$(ls /opt | grep "jdk")
-    if [ "$unzipDir" != "java" ]; then
-        mv /opt/$unzipDir /opt/java
-    fi
-}
-
-# Downloads and setup AWS CLI
-setupAWSCLI()
-{
-    echo "[INFO] Installing 'awscli'"
-    pip install --upgrade awscli
-    if [ $? -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[ERROR] Failed to install 'awscli' using pip"
-    echo "[INFO] Trying to install awscli using zip archive"
-    echo "[INFO] Downloading awscli zip"
-
-    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
-
-    echo "[INFO] Unzipping awscli zip"
-    unzip /opt/awscli-bundle.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip awscli zip"
-    fi
-
-    rm -Rf /opt/awscli-bundle.zip
-
-    echo "[INFO] Installing awscli"
-    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install awscli"
-    fi
-
-    echo "[INFO] Successfully installed awscli from zip archive"
-}
-
-# Setup all the pre-requisites (packages, settings and etc.)
-setupPreRequisites()
-{
-    echo "[INFO] Installing 'wget' package"
-    yum -y install wget
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'wget' package"
-    fi
-
-    echo "[INFO] Installing 'net-tools' package"
-    yum -y install net-tools
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'net-tools' package"
-    fi
-
-    echo "[INFO] Installing 'python' package"
-    yum -y install python
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'python' package"
-    fi
-
-    echo "[INFO] Installing 'unzip' package"
-    yum -y install unzip
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'unzip' package"
-    fi
-
-    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
-
-    echo "[INFO] Installing 'pip'"
-    python /opt/get-pip.py
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'pip'"
-    fi
-}
-
-# Downloads and setup tests package
-setupTestsPackage()
-{
-    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
-
-    rm -Rf /opt/ignite-cassandra-tests
-
-    unzip /opt/ignite-cassandra-tests.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip tests package"
-    fi
-
-    rm -f /opt/ignite-cassandra-tests.zip
-
-    unzipDir=$(ls /opt | grep "ignite-cassandra")
-    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
-        mv /opt/$unzipDir /opt/ignite-cassandra-tests
-    fi
-
-    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
-
-    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ignite"
-
-    setupNTP
-
-    echo "[INFO] Starting logs collector daemon"
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_IGNITE_LOGS/$HOST_NAME" "/opt/ignite/work/log" "/opt/ignite/ignite-start.log" > /opt/logs-collector.log &
-
-    echo "[INFO] Logs collector daemon started: $!"
-
-    echo "----------------------------------------------------------------------------------------"
-    printInstanceInfo
-    echo "----------------------------------------------------------------------------------------"
-    tagInstance
-    bootstrapGangliaAgent "ignite" 8642
-}
-
-# Downloads Ignite package
-downloadIgnite()
-{
-    downloadPackage "$IGNITE_DOWNLOAD_URL" "/opt/ignite.zip" "Ignite"
-
-    rm -Rf /opt/ignite
-
-    echo "[INFO] Unzipping Ignite package"
-    unzip /opt/ignite.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip Ignite package"
-    fi
-
-    rm -f /opt/ignite.zip
-
-    unzipDir=$(ls /opt | grep "ignite" | grep "apache")
-    if [ "$unzipDir" != "ignite" ]; then
-        mv /opt/$unzipDir /opt/ignite
-    fi
-}
-
-# Setups Ignite
-setupIgnite()
-{
-    echo "[INFO] Creating 'ignite' group"
-    exists=$(cat /etc/group | grep ignite)
-    if [ -z "$exists" ]; then
-        groupadd ignite
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create 'ignite' group"
-        fi
-    fi
-
-    echo "[INFO] Creating 'ignite' user"
-    exists=$(cat /etc/passwd | grep ignite)
-    if [ -z "$exists" ]; then
-        useradd -g ignite ignite
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create 'ignite' user"
-        fi
-    fi
-
-    testsJar=$(find /opt/ignite-cassandra-tests -type f -name "*.jar" | grep ignite-cassandra- | grep tests.jar)
-    if [ -n "$testsJar" ]; then
-        echo "[INFO] Coping tests jar $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
-        cp $testsJar /opt/ignite/libs/optional/ignite-cassandra
-        if [ $? -ne 0 ]; then
-            terminate "Failed copy $testsJar into /opt/ignite/libs/optional/ignite-cassandra"
-        fi
-    fi
-
-    rm -f /opt/ignite/config/ignite-cassandra-server-template.xml
-    mv -f /opt/ignite-cassandra-tests/bootstrap/aws/ignite/ignite-cassandra-server-template.xml /opt/ignite/config
-
-    chown -R ignite:ignite /opt/ignite /opt/ignite-cassandra-tests
-
-    echo "export JAVA_HOME=/opt/java" >> $1
-    echo "export IGNITE_HOME=/opt/ignite" >> $1
-    echo "export USER_LIBS=\$IGNITE_HOME/libs/optional/ignite-cassandra/*:\$IGNITE_HOME/libs/optional/ignite-slf4j/*" >> $1
-    echo "export PATH=\$JAVA_HOME/bin:\$IGNITE_HOME/bin:\$PATH" >> $1
-}
-
-###################################################################################################################
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Bootstrapping Ignite node"
-echo "[INFO]-----------------------------------------------------------------"
-
-setupPreRequisites
-setupJava
-setupAWSCLI
-setupTestsPackage
-
-downloadIgnite
-setupIgnite "/root/.bash_profile"
-
-cmd="/opt/ignite-cassandra-tests/bootstrap/aws/ignite/ignite-start.sh"
-
-#sudo -u ignite -g ignite sh -c "$cmd | tee /opt/ignite/ignite-start.log"
-
-$cmd | tee /opt/ignite/ignite-start.log
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
deleted file mode 100644
index 692cd8b0b5ce0..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-cassandra-server-template.xml
+++ /dev/null
@@ -1,181 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:util="http://www.springframework.org/schema/util"
-       xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd
-        http://www.springframework.org/schema/util
-        http://www.springframework.org/schema/util/spring-util.xsd">
-
-    <!-- Cassandra connection settings -->
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
-        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
-            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
-        </constructor-arg>
-    </bean>
-
-    <util:list id="contactPoints" value-type="java.lang.String">
-        ${CASSANDRA_SEEDS}
-    </util:list>
-
-    <bean id="cassandraAdminDataSource" class="org.apache.ignite.cache.store.cassandra.datasource.DataSource">
-        <property name="user" value="cassandra"/>
-        <property name="password" value="cassandra"/>
-        <property name="contactPoints" ref="contactPoints"/>
-        <property name="readConsistency" value="ONE"/>
-        <property name="writeConsistency" value="ONE"/>
-        <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
-    </bean>
-
-    <!-- Persistence settings for 'cache1' -->
-    <bean id="cache1_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="primitive_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" column="key"/>
-    <valuePersistence class="java.lang.Long" strategy="PRIMITIVE" column="value"/>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Persistence settings for 'cache2' -->
-    <bean id="cache2_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="blob_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" />
-    <valuePersistence strategy="BLOB"/>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Persistence settings for 'cache3' -->
-    <bean id="cache3_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="pojo_test3" ttl="86400">
-    <keyspaceOptions>
-        REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 3}
-        AND DURABLE_WRITES = true
-    </keyspaceOptions>
-    <tableOptions>
-        comment = 'A most excellent and useful table'
-        AND read_repair_chance = 0.2
-    </tableOptions>
-    <keyPersistence class="org.apache.ignite.tests.pojos.PersonId" strategy="POJO">
-        <partitionKey>
-            <field name="companyCode" column="company" />
-            <field name="departmentCode" column="department" />
-        </partitionKey>
-        <clusterKey>
-            <field name="personNumber" column="number" sort="desc"/>
-        </clusterKey>
-    </keyPersistence>
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person"
-                      strategy="POJO"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer">
-        <field name="firstName" column="first_name" />
-        <field name="lastName" column="last_name" />
-        <field name="age" />
-        <field name="married" index="true"/>
-        <field name="height" />
-        <field name="weight" />
-        <field name="birthDate" column="birth_date" />
-        <field name="phones" />
-    </valuePersistence>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Configuring persistence for "cache1" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache1"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="backups" value="0"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="writeBehindEnabled" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache1_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache2" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache2"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="backups" value="0"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="writeBehindEnabled" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache2_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache3" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache3"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="backups" value="0"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="writeBehindEnabled" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache3_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-            </list>
-        </property>
-
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">
-                        <property name="addresses">
-                            <list>
-                                ${IGNITE_SEEDS}
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-env.sh b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-env.sh
deleted file mode 100644
index bfe3371917077..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-env.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Environment setup script from Ignite
-# -----------------------------------------------------------------------------------------------
-
-JVM_OPTS="-Xms10g -Xmx10g -server -XX:+AggressiveOpts -XX:MaxMetaspaceSize=256m"
-JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseTLAB -XX:NewSize=128m -XX:MaxNewSize=768m"
-#JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=0 -XX:SurvivorRatio=1024 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=60"
-JVM_OPTS="$JVM_OPTS -Xss16m"
-
-export JVM_OPTS
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-start.sh b/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-start.sh
deleted file mode 100644
index f2c15574a3669..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/ignite/ignite-start.sh
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Script to start Ignite daemon (used by ignite-bootstrap.sh)
-# -----------------------------------------------------------------------------------------------
-
-#profile=/home/ignite/.bash_profile
-profile=/root/.bash_profile
-
-. $profile
-. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "ignite"
-
-# Setups Cassandra seeds for this Ignite node being able to connect to Cassandra.
-# Looks for the information in S3 about already up and running Cassandra cluster nodes.
-setupCassandraSeeds()
-{
-    setupClusterSeeds "cassandra" "true"
-
-    CLUSTER_SEEDS=($CLUSTER_SEEDS)
-	count=${#CLUSTER_SEEDS[@]}
-
-    CASSANDRA_SEEDS=
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-	do
-		seed=${CLUSTER_SEEDS[$i]}
-        CASSANDRA_SEEDS="${CASSANDRA_SEEDS}<value>$seed<\/value>"
-	done
-
-    cat /opt/ignite/config/ignite-cassandra-server-template.xml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS/g" > /opt/ignite/config/ignite-cassandra-server.xml
-}
-
-# Setups Ignite nodes which this EC2 Ignite node will use to send its metadata and join Ignite cluster
-setupIgniteSeeds()
-{
-    if [ "$FIRST_NODE_LOCK" == "true" ]; then
-        echo "[INFO] Setting up Ignite seeds"
-
-        CLUSTER_SEEDS="127.0.0.1:47500..47509"
-
-        echo "[INFO] Using localhost address as a seed for the first Ignite node: $CLUSTER_SEEDS"
-
-        aws s3 rm --recursive ${S3_IGNITE_NODES_DISCOVERY::-1}
-        if [ $? -ne 0 ]; then
-            terminate "Failed to clean Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
-        fi
-    else
-        setupClusterSeeds "ignite" "true"
-    fi
-
-    CLUSTER_SEEDS=($CLUSTER_SEEDS)
-	count=${#CLUSTER_SEEDS[@]}
-
-    IGNITE_SEEDS=
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-	do
-		seed=${CLUSTER_SEEDS[$i]}
-        IGNITE_SEEDS="${IGNITE_SEEDS}<value>$seed<\/value>"
-	done
-
-    cat /opt/ignite/config/ignite-cassandra-server.xml | sed -r "s/\\\$\{IGNITE_SEEDS\}/$IGNITE_SEEDS/g" > /opt/ignite/config/ignite-cassandra-server1.xml
-    mv -f /opt/ignite/config/ignite-cassandra-server1.xml /opt/ignite/config/ignite-cassandra-server.xml
-}
-
-# Checks status of Ignite daemon
-checkIgniteStatus()
-{
-    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
-
-    nodeId=
-    nodeAddrs=
-    nodePorts=
-    topology=
-    metrics=
-
-    logFile=$(ls /opt/ignite/work/log/ | grep "\.log$")
-    if [ -n "$logFile" ]; then
-        logFile=/opt/ignite/work/log/$logFile
-        nodeId=$(cat $logFile | grep "Local node \[ID")
-        nodeAddrs=$(cat $logFile | grep "Local node addresses:")
-        nodePorts=$(cat $logFile | grep "Local ports:")
-        topology=$(cat $logFile | grep "Topology snapshot")
-        metrics=$(cat $logFile | grep "Metrics for local node" | head -n 1)
-    fi
-
-    if [ -n "$nodeId" ] && [ -n "$nodeAddrs" ] && [ -n "$nodePorts" ] && [ -n "$topology" ] && [ -n "$metrics" ] && [ -n "$proc" ]; then
-        sleep 30s
-        return 0
-    fi
-
-    return 1
-}
-
-# Gracefully starts Ignite daemon and waits until it joins Ignite cluster
-startIgnite()
-{
-    echo "[INFO]-------------------------------------------------------------"
-    echo "[INFO] Trying attempt $START_ATTEMPT to start Ignite daemon"
-    echo "[INFO]-------------------------------------------------------------"
-    echo ""
-
-    setupCassandraSeeds
-    setupIgniteSeeds
-
-    waitToJoinCluster
-
-    if [ "$FIRST_NODE_LOCK" == "true" ]; then
-        aws s3 rm --recursive ${S3_IGNITE_NODES_DISCOVERY::-1}
-        if [ $? -ne 0 ]; then
-            terminate "Failed to clean Ignite node discovery URL: $S3_IGNITE_NODES_DISCOVERY"
-        fi
-    fi
-
-    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
-    proc=($proc)
-
-    if [ -n "${proc[1]}" ]; then
-        echo "[INFO] Terminating existing Ignite process ${proc[1]}"
-        kill -9 ${proc[1]}
-    fi
-
-    echo "[INFO] Starting Ignite"
-    rm -Rf /opt/ignite/work/*
-    /opt/ignite/bin/ignite.sh /opt/ignite/config/ignite-cassandra-server.xml &
-
-    echo "[INFO] Ignite job id: $!"
-
-    sleep 1m
-
-    START_ATTEMPT=$(( $START_ATTEMPT+1 ))
-}
-
-#######################################################################################################
-
-START_ATTEMPT=0
-
-# Cleans all the previous metadata about this EC2 node
-unregisterNode
-
-# Tries to get first-node lock
-tryToGetFirstNodeLock
-
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ "$FIRST_NODE_LOCK" == "true" ]; then
-    echo "[INFO] Starting first Ignite node"
-else
-    echo "[INFO] Starting Ignite node"
-fi
-
-echo "[INFO]-----------------------------------------------------------------"
-printInstanceInfo
-echo "[INFO]-----------------------------------------------------------------"
-
-if [ "$FIRST_NODE_LOCK" != "true" ]; then
-    waitFirstClusterNodeRegistered "true"
-else
-    cleanupMetadata
-fi
-
-# Applies Ignite environment settings from ignite-env.sh
-envScript=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/ignite-env.sh)
-if [ -f "$envScript" ]; then
-    . $envScript
-fi
-
-# Start Ignite daemon
-startIgnite
-
-startTime=$(date +%s)
-
-# Trying multiple attempts to start Ignite daemon
-while true; do
-    proc=$(ps -ef | grep java | grep "org.apache.ignite.startup.cmdline.CommandLineStartup")
-
-    checkIgniteStatus
-
-    if [ $? -eq 0 ]; then
-        sleep 1m
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Ignite daemon successfully started"
-        echo "[INFO]-----------------------------------------------------"
-        echo $proc
-        echo "[INFO]-----------------------------------------------------"
-
-        # Once node joined the cluster we need to remove cluster-join lock
-        # to allow other EC2 nodes to acquire it and join cluster sequentially
-        removeClusterJoinLock
-
-        break
-    fi
-
-    currentTime=$(date +%s)
-    duration=$(( $currentTime-$startTime ))
-    duration=$(( $duration/60 ))
-
-    if [ $duration -gt $SERVICE_STARTUP_TIME ]; then
-        if [ "$FIRST_NODE_LOCK" == "true" ]; then
-            # If the first node of Ignite cluster failed to start Ignite daemon in SERVICE_STARTUP_TIME min,
-            # we will not try any other attempts and just terminate with error. Terminate function itself, will
-            # take care about removing all the locks holding by this node.
-            terminate "${SERVICE_STARTUP_TIME}min timeout expired, but first Ignite daemon is still not up and running"
-        else
-            # If node isn't the first node of Ignite cluster and it failed to start we need to
-            # remove cluster-join lock to allow other EC2 nodes to acquire it
-            removeClusterJoinLock
-
-            # If node failed all SERVICE_START_ATTEMPTS attempts to start Ignite daemon we will not
-            # try anymore and terminate with error
-            if [ $START_ATTEMPT -gt $SERVICE_START_ATTEMPTS ]; then
-                terminate "${SERVICE_START_ATTEMPTS} attempts exceed, but Ignite daemon is still not up and running"
-            fi
-
-            # New attempt to start Ignite daemon
-            startIgnite
-        fi
-
-        continue
-    fi
-
-    # Handling situation when Ignite daemon process abnormally terminated
-    if [ -z "$proc" ]; then
-        # If this is the first node of Ignite cluster just terminating with error
-        if [ "$FIRST_NODE_LOCK" == "true" ]; then
-            terminate "Failed to start Ignite daemon"
-        fi
-
-        # Remove cluster-join lock to allow other EC2 nodes to acquire it
-        removeClusterJoinLock
-
-        echo "[WARN] Failed to start Ignite daemon. Sleeping for extra 30sec"
-        sleep 30s
-
-        # New attempt to start Ignite daemon
-        startIgnite
-
-        continue
-    fi
-
-    echo "[INFO] Waiting for Ignite daemon to start, time passed ${duration}min"
-    sleep 30s
-done
-
-# Once Ignite daemon successfully started we registering new Ignite node in S3
-registerNode
-
-# Terminating script with zero exit code
-terminate
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/logs-collector.sh b/modules/cassandra/store/src/test/bootstrap/aws/logs-collector.sh
deleted file mode 100644
index 1634b89cff575..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/logs-collector.sh
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Logs collector daemon
-# -----------------------------------------------------------------------------------------------
-# Script is launched in background by all EC2 nodes of all clusters (Cassandra, Ignite, Tests) and
-# periodically (each 30 seconds) checks if specific S3 trigger file (specified by $S3_LOGS_TRIGGER_URL)
-# was created or its timestamp was changed. Such an event serve as a trigger for the script
-# to collect EC2 instance logs (from folder specified by $1) and upload them into specific
-# S3 folder (specified by $S3_LOGS_FOLDER).
-# -----------------------------------------------------------------------------------------------
-
-uploadLogs()
-{
-    if [ ! -d "$1" ]; then
-        echo "[INFO] Logs directory doesn't exist: $1"
-        return 0
-    fi
-
-    echo "[INFO] Uploading logs from directory: $1"
-
-    dirList=$(ls $1 | head -1)
-
-    if [ -z "$dirList" ]; then
-        echo "[INFO] Directory is empty: $1"
-    fi
-
-    for i in 0 9;
-    do
-        aws s3 sync --sse AES256 --delete "$1" "$S3_LOGS_FOLDER"
-        code=$?
-
-        if [ $code -eq 0 ]; then
-            echo "[INFO] Successfully uploaded logs from directory: $1"
-            return 0
-        fi
-
-        echo "[WARN] Failed to upload logs from $i attempt, sleeping extra 30sec"
-        sleep 30s
-    done
-
-    echo "[ERROR] All 10 attempts to upload logs are failed for the directory: $1"
-}
-
-createNewLogsSnapshot()
-{
-    rm -f ~/logs-collector.snapshot.new
-
-    for log_src in "$@"
-    do
-        if [ -d "$log_src" ] || [ -f "$log_src" ]; then
-            ls -alR $log_src >> ~/logs-collector.snapshot.new
-
-        fi
-    done
-}
-
-checkLogsChanged()
-{
-    createNewLogsSnapshot $@
-
-    if [ ! -f "~/logs-collector.snapshot" ]; then
-        return 1
-    fi
-
-    diff "~/logs-collector.snapshot" "~/logs-collector.snapshot.new" > /dev/null
-
-    return $?
-}
-
-updateLogsSnapshot()
-{
-    if [ ! -f "~/logs-collector.snapshot.new" ]; then
-        return 0
-    fi
-
-    rm -f "~/logs-collector.snapshot"
-    mv "~/logs-collector.snapshot.new" "~/logs-collector.snapshot"
-}
-
-collectLogs()
-{
-    createNewLogsSnapshot
-
-    rm -Rf ~/logs-collector-logs
-    mkdir -p ~/logs-collector-logs
-
-    for log_src in "$@"
-    do
-        if [ -f "$log_src" ]; then
-            echo "[INFO] Collecting log file: $log_src"
-            cp -f $log_src ~/logs-collector-logs
-        elif [ -d "$log_src" ]; then
-            echo "[INFO] Collecting logs from folder: $log_src"
-            cp -Rf $log_src ~/logs-collector-logs
-        fi
-    done
-
-    uploadLogs ~/logs-collector-logs
-
-    rm -Rf ~/logs-collector-logs
-
-    updateLogsSnapshot
-}
-
-echo "[INFO] Running Logs collector service"
-
-if [ -z "$1" ]; then
-    echo "[ERROR] Logs collection S3 trigger URL doesn't specified"
-    exit 1
-fi
-
-S3_LOGS_TRIGGER_URL=$1
-
-echo "[INFO] Logs collection S3 trigger URL: $S3_LOGS_TRIGGER_URL"
-
-if [ -z "$2" ]; then
-    echo "[ERROR] S3 folder where to upload logs doesn't specified"
-    exit 1
-fi
-
-S3_LOGS_FOLDER=$2
-
-echo "[INFO] S3 logs upload folder: $S3_LOGS_FOLDER"
-
-shift 2
-
-if [ -z "$1" ]; then
-    echo "[WARN] Local logs sources don't specified"
-else
-    echo "[INFO] Local logs sources: $@"
-fi
-
-echo "--------------------------------------------------------------------"
-
-TRIGGER_STATE=
-
-while true; do
-    sleep 30s
-
-    STATE=$(aws s3 ls $S3_LOGS_TRIGGER_URL)
-
-    if [ -z "$STATE" ] || [ "$STATE" == "$TRIGGER_STATE" ]; then
-        checkLogsChanged
-
-        if [ $? -eq 0 ]; then
-            continue
-        fi
-    fi
-
-    TRIGGER_STATE=$STATE
-
-    collectLogs $@ /var/log/cloud-init.log /var/log/cloud-init-output.log
-
-    echo "--------------------------------------------------------------------"
-done
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml b/modules/cassandra/store/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
deleted file mode 100644
index 2989563ddaf1a..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/tests/ignite-cassandra-client-template.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:util="http://www.springframework.org/schema/util"
-       xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd
-        http://www.springframework.org/schema/util
-        http://www.springframework.org/schema/util/spring-util.xsd">
-
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
-        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
-            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
-        </constructor-arg>
-    </bean>
-
-    <util:list id="contactPoints" value-type="java.lang.String">
-        ${CASSANDRA_SEEDS}
-    </util:list>
-
-    <bean id="cassandraAdminDataSource" class="org.apache.ignite.cache.store.cassandra.datasource.DataSource">
-        <property name="user" value="cassandra"/>
-        <property name="password" value="cassandra"/>
-        <property name="contactPoints" ref="contactPoints"/>
-        <property name="readConsistency" value="ONE"/>
-        <property name="writeConsistency" value="ONE"/>
-        <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
-    </bean>
-
-    <!-- Persistence settings for 'cache1' -->
-    <bean id="cache1_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="primitive_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" column="key"/>
-    <valuePersistence class="java.lang.Long" strategy="PRIMITIVE" column="value"/>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Persistence settings for 'cache2' -->
-    <bean id="cache2_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="blob_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" />
-    <valuePersistence strategy="BLOB"/>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Persistence settings for 'cache3' -->
-    <bean id="cache3_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="pojo_test3" ttl="86400">
-    <keyspaceOptions>
-        REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 3}
-        AND DURABLE_WRITES = true
-    </keyspaceOptions>
-    <tableOptions>
-        comment = 'A most excellent and useful table'
-        AND read_repair_chance = 0.2
-    </tableOptions>
-    <keyPersistence class="org.apache.ignite.tests.pojos.PersonId" strategy="POJO">
-        <partitionKey>
-            <field name="companyCode" column="company" />
-            <field name="departmentCode" column="department" />
-        </partitionKey>
-        <clusterKey>
-            <field name="personNumber" column="number" sort="desc"/>
-        </clusterKey>
-    </keyPersistence>
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person"
-                      strategy="POJO"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer">
-        <field name="firstName" column="first_name" />
-        <field name="lastName" column="last_name" />
-        <field name="age" />
-        <field name="married" index="true"/>
-        <field name="height" />
-        <field name="weight" />
-        <field name="birthDate" column="birth_date" />
-        <field name="phones" />
-    </valuePersistence>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-        <!-- Turn on client mode -->
-        <property name="clientMode" value="true"/>
-
-        <property name="metricsLogFrequency" value="0"/>
-
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Configuring persistence for "cache1" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache1"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="backups" value="0"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="writeBehindEnabled" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache1_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache2" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache2"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="backups" value="0"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="writeBehindEnabled" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache2_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache3" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache3"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="backups" value="0"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="writeBehindEnabled" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache3_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-            </list>
-        </property>
-
-        <!-- Configuring remote ignite cluster connections -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">
-                        <property name="addresses">
-                            <list>
-                                ${IGNITE_SEEDS}
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-bootstrap.sh b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-bootstrap.sh
deleted file mode 100644
index 8e6faff699b01..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-bootstrap.sh
+++ /dev/null
@@ -1,317 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Bootstrap script to spin up Tests cluster
-# -----------------------------------------------------------------------------------------------
-
-# URL to download AWS CLI tools
-AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
-
-# URL to download JDK
-JDK_DOWNLOAD_URL=http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz
-
-# URL to download Ignite-Cassandra tests package - you should previously package and upload it to this place
-TESTS_PACKAGE_DONLOAD_URL=s3://<bucket>/<folder>/ignite-cassandra-tests-<version>.zip
-
-# Terminates script execution and upload logs to S3
-terminate()
-{
-    SUCCESS_URL=$S3_TESTS_SUCCESS
-    FAILURE_URL=$S3_TESTS_FAILURE
-
-    if [ -n "$SUCCESS_URL" ] && [[ "$SUCCESS_URL" != */ ]]; then
-        SUCCESS_URL=${SUCCESS_URL}/
-    fi
-
-    if [ -n "$FAILURE_URL" ] && [[ "$FAILURE_URL" != */ ]]; then
-        FAILURE_URL=${FAILURE_URL}/
-    fi
-
-    host_name=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    msg=$host_name
-
-    if [ -n "$1" ]; then
-        echo "[ERROR] $1"
-        echo "[ERROR]-----------------------------------------------------"
-        echo "[ERROR] Test node bootstrap failed"
-        echo "[ERROR]-----------------------------------------------------"
-        msg=$1
-
-        if [ -z "$FAILURE_URL" ]; then
-            exit 1
-        fi
-
-        reportFolder=${FAILURE_URL}${host_name}
-        reportFile=$reportFolder/__error__
-    else
-        echo "[INFO]-----------------------------------------------------"
-        echo "[INFO] Test node bootstrap successfully completed"
-        echo "[INFO]-----------------------------------------------------"
-
-        if [ -z "$SUCCESS_URL" ]; then
-            exit 0
-        fi
-
-        reportFolder=${SUCCESS_URL}${host_name}
-        reportFile=$reportFolder/__success__
-    fi
-
-    echo $msg > /opt/bootstrap-result
-
-    aws s3 rm --recursive $reportFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to drop report folder: $reportFolder"
-    fi
-
-    aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to report bootstrap result to: $reportFile"
-    fi
-
-    rm -f /opt/bootstrap-result
-
-    if [ -n "$1" ]; then
-        exit 1
-    fi
-
-    exit 0
-}
-
-# Downloads specified package
-downloadPackage()
-{
-    echo "[INFO] Downloading $3 package from $1 into $2"
-
-    for i in 0 9;
-    do
-        if [[ "$1" == s3* ]]; then
-            aws s3 cp $1 $2
-            code=$?
-        else
-            curl "$1" -o "$2"
-            code=$?
-        fi
-
-        if [ $code -eq 0 ]; then
-            echo "[INFO] $3 package successfully downloaded from $1 into $2"
-            return 0
-        fi
-
-        echo "[WARN] Failed to download $3 package from $i attempt, sleeping extra 5sec"
-        sleep 5s
-    done
-
-    terminate "All 10 attempts to download $3 package from $1 are failed"
-}
-
-# Downloads and setup JDK
-setupJava()
-{
-    rm -Rf /opt/java /opt/jdk.tar.gz
-
-    echo "[INFO] Downloading 'jdk'"
-    wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$JDK_DOWNLOAD_URL" -O /opt/jdk.tar.gz
-    if [ $? -ne 0 ]; then
-        terminate "Failed to download 'jdk'"
-    fi
-
-    echo "[INFO] Untaring 'jdk'"
-    tar -xvzf /opt/jdk.tar.gz -C /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to untar 'jdk'"
-    fi
-
-    rm -Rf /opt/jdk.tar.gz
-
-    unzipDir=$(ls /opt | grep "jdk")
-    if [ "$unzipDir" != "java" ]; then
-        mv /opt/$unzipDir /opt/java
-    fi
-}
-
-# Downloads and setup AWS CLI
-setupAWSCLI()
-{
-    echo "[INFO] Installing 'awscli'"
-    pip install --upgrade awscli
-    if [ $? -eq 0 ]; then
-        return 0
-    fi
-
-    echo "[ERROR] Failed to install 'awscli' using pip"
-    echo "[INFO] Trying to install awscli using zip archive"
-    echo "[INFO] Downloading awscli zip"
-
-    downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli"
-
-    echo "[INFO] Unzipping awscli zip"
-    unzip /opt/awscli-bundle.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip awscli zip"
-    fi
-
-    rm -Rf /opt/awscli-bundle.zip
-
-    echo "[INFO] Installing awscli"
-    /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install awscli"
-    fi
-
-    echo "[INFO] Successfully installed awscli from zip archive"
-}
-
-# Setup all the pre-requisites (packages, settings and etc.)
-setupPreRequisites()
-{
-    echo "[INFO] Installing 'wget' package"
-    yum -y install wget
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'wget' package"
-    fi
-
-    echo "[INFO] Installing 'net-tools' package"
-    yum -y install net-tools
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'net-tools' package"
-    fi
-
-    echo "[INFO] Installing 'python' package"
-    yum -y install python
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'python' package"
-    fi
-
-    echo "[INFO] Installing 'unzip' package"
-    yum -y install unzip
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'unzip' package"
-    fi
-
-    downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py"
-
-    echo "[INFO] Installing 'pip'"
-    python /opt/get-pip.py
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install 'pip'"
-    fi
-}
-
-# Downloads and setup tests package
-setupTestsPackage()
-{
-    downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/ignite-cassandra-tests.zip" "Tests"
-
-    rm -Rf /opt/ignite-cassandra-tests
-
-    unzip /opt/ignite-cassandra-tests.zip -d /opt
-    if [ $? -ne 0 ]; then
-        terminate "Failed to unzip tests package"
-    fi
-
-    rm -f /opt/ignite-cassandra-tests.zip
-
-    unzipDir=$(ls /opt | grep "ignite-cassandra")
-    if [ "$unzipDir" != "ignite-cassandra-tests" ]; then
-        mv /opt/$unzipDir /opt/ignite-cassandra-tests
-    fi
-
-    find /opt/ignite-cassandra-tests -type f -name "*.sh" -exec chmod ug+x {} \;
-
-    . /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "test"
-
-    setupNTP
-
-    echo "[INFO] Starting logs collector daemon"
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-    /opt/ignite-cassandra-tests/bootstrap/aws/logs-collector.sh "$S3_LOGS_TRIGGER" "$S3_TESTS_LOGS/$HOST_NAME" "/opt/ignite-cassandra-tests/logs" > /opt/logs-collector.log &
-
-    echo "[INFO] Logs collector daemon started: $!"
-
-    echo "----------------------------------------------------------------------------------------"
-    printInstanceInfo
-    echo "----------------------------------------------------------------------------------------"
-    tagInstance
-    bootstrapGangliaAgent "test" 8643
-
-    ###################################################
-    # Extra configuration specific only for test node #
-    ###################################################
-
-    echo "[INFO] Installing bc package"
-
-    yum -y install bc
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install bc package"
-    fi
-
-    echo "[INFO] Installing zip package"
-
-    yum -y install zip
-
-    if [ $? -ne 0 ]; then
-        terminate "Failed to install zip package"
-    fi
-
-    echo "[INFO] Creating 'ignite' group"
-    exists=$(cat /etc/group | grep ignite)
-    if [ -z "$exists" ]; then
-        groupadd ignite
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create 'ignite' group"
-        fi
-    fi
-
-    echo "[INFO] Creating 'ignite' user"
-    exists=$(cat /etc/passwd | grep ignite)
-    if [ -z "$exists" ]; then
-        useradd -g ignite ignite
-        if [ $? -ne 0 ]; then
-            terminate "Failed to create 'ignite' user"
-        fi
-    fi
-
-    mkdir -p /opt/ignite-cassandra-tests/logs
-    chown -R ignite:ignite /opt/ignite-cassandra-tests
-
-    echo "export JAVA_HOME=/opt/java" >> $1
-    echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> $1
-}
-
-###################################################################################################################
-
-echo "[INFO]-----------------------------------------------------------------"
-echo "[INFO] Bootstrapping Tests node"
-echo "[INFO]-----------------------------------------------------------------"
-
-setupPreRequisites
-setupJava
-setupAWSCLI
-setupTestsPackage "/root/.bash_profile"
-
-cmd="/opt/ignite-cassandra-tests/bootstrap/aws/tests/tests-manager.sh"
-
-#sudo -u ignite -g ignite sh -c "$cmd > /opt/ignite-cassandra-tests/tests-manager" &
-
-$cmd > /opt/ignite-cassandra-tests/logs/tests-manager.log &
-
-terminate
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-manager.sh b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-manager.sh
deleted file mode 100644
index c0f5d6b8cba9b..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-manager.sh
+++ /dev/null
@@ -1,458 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Tests manager daemon
-# -----------------------------------------------------------------------------------------------
-# Script is launched in background by all nodes of Tests cluster and
-# periodically (each 30 seconds) checks if specific S3 trigger file was created or
-# its timestamp was changed. Such an event serve as a trigger for the script to start
-# preparing to run load tests.
-# -----------------------------------------------------------------------------------------------
-
-#profile=/home/ignite/.bash_profile
-profile=/root/.bash_profile
-
-. $profile
-. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "test"
-
-# Switch test node to IDLE state
-switchToIdleState()
-{
-    if [ "$NODE_STATE" != "IDLE" ]; then
-        echo "[INFO] Switching node to IDLE state"
-        dropStateFlag "$S3_TESTS_WAITING" "$S3_TESTS_PREPARING" "$S3_TESTS_RUNNING"
-        createStateFlag "$S3_TESTS_IDLE"
-        NODE_STATE="IDLE"
-        echo "[INFO] Node was switched to IDLE state"
-    fi
-}
-
-# Switch test node to PREPARING state
-switchToPreparingState()
-{
-    if [ "$NODE_STATE" != "PREPARING" ]; then
-        echo "[INFO] Switching node to PREPARING state"
-        dropStateFlag "$S3_TESTS_WAITING" "$S3_TESTS_IDLE" "$S3_TESTS_RUNNING"
-        createStateFlag "$S3_TESTS_PREPARING"
-        NODE_STATE="PREPARING"
-        echo "[INFO] Node was switched to PREPARING state"
-    fi
-}
-
-# Switch test node to WAITING state
-switchToWaitingState()
-{
-    if [ "$NODE_STATE" != "WAITING" ]; then
-        echo "[INFO] Switching node to WAITING state"
-        dropStateFlag "$S3_TESTS_IDLE" "$S3_TESTS_PREPARING" "$S3_TESTS_RUNNING"
-        createStateFlag "$S3_TESTS_WAITING"
-        NODE_STATE="WAITING"
-        echo "[INFO] Node was switched to WAITING state"
-    fi
-}
-
-# Switch test node to RUNNING state
-switchToRunningState()
-{
-    if [ "$NODE_STATE" != "RUNNING" ]; then
-        echo "[INFO] Switching node to RUNNING state"
-        dropStateFlag "$S3_TESTS_IDLE" "$S3_TESTS_PREPARING" "$S3_TESTS_WAITING"
-        createStateFlag "$S3_TESTS_RUNNING"
-        NODE_STATE="RUNNING"
-        echo "[INFO] Node was switched to RUNNING state"
-    fi
-}
-
-# Creates appropriate state flag for the node in S3
-createStateFlag()
-{
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    aws s3 cp --sse AES256 /etc/hosts ${1}${HOST_NAME}
-    if [ $? -ne 0 ]; then
-        terminate "Failed to create state flag: ${1}${HOST_NAME}"
-    fi
-}
-
-# Drops appropriate state flag for the node in S3
-dropStateFlag()
-{
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    for flagUrl in "$@"
-    do
-        exists=$(aws s3 ls ${flagUrl}${HOST_NAME})
-        if [ -n "$exists" ]; then
-            aws s3 rm ${flagUrl}${HOST_NAME}
-            if [ $? -ne 0 ]; then
-                terminate "Failed to drop state flag: ${flagUrl}${HOST_NAME}"
-            fi
-        fi
-    done
-}
-
-# Removes tests summary report from S3
-dropTestsSummary()
-{
-    exists=$(aws s3 ls $S3_TESTS_SUMMARY)
-    if [ -z "$exists" ]; then
-        return 0
-    fi
-
-    aws s3 rm $S3_TESTS_SUMMARY
-    if [ $? -ne 0 ]; then
-        terminate "Failed to drop tests summary info: $S3_TESTS_SUMMARY"
-    fi
-}
-
-# Recreate all the necessary Cassandra artifacts before running Load tests
-recreateCassandraArtifacts()
-{
-    /opt/ignite-cassandra-tests/recreate-cassandra-artifacts.sh
-    if [ $? -ne 0 ]; then
-        terminate "Failed to recreate Cassandra artifacts"
-    fi
-}
-
-# Setups Cassandra seeds for this Tests node being able to connect to Cassandra.
-# Looks for the information in S3 about already up and running Cassandra cluster nodes.
-setupCassandraSeeds()
-{
-    if [ $CASSANDRA_NODES_COUNT -eq 0 ]; then
-        return 0
-    fi
-
-    setupClusterSeeds "cassandra"
-
-    CASSANDRA_SEEDS1=$(echo $CLUSTER_SEEDS | sed -r "s/ /,/g")
-    CASSANDRA_SEEDS2=
-
-    CLUSTER_SEEDS=($CLUSTER_SEEDS)
-	count=${#CLUSTER_SEEDS[@]}
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-	do
-		seed=${CLUSTER_SEEDS[$i]}
-        CASSANDRA_SEEDS2="${CASSANDRA_SEEDS2}<value>$seed<\/value>"
-	done
-
-    echo "[INFO] Using Cassandra seeds: $CASSANDRA_SEEDS1"
-
-    echo "contact.points=$CASSANDRA_SEEDS1" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/connection.properties
-
-    cat /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template.xml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS2/g" > /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml
-}
-
-# Setups Ignite nodes for this Tests node being able to connect to Ignite.
-# Looks for the information in S3 about already up and running Cassandra cluster nodes.
-setupIgniteSeeds()
-{
-    if [ $IGNITE_NODES_COUNT -eq 0 ]; then
-        return 0
-    fi
-
-    setupClusterSeeds "ignite"
-
-    CLUSTER_SEEDS=($CLUSTER_SEEDS)
-	count=${#CLUSTER_SEEDS[@]}
-
-    IGNITE_SEEDS=
-
-	for (( i=0; i<=$(( $count -1 )); i++ ))
-	do
-		seed=${CLUSTER_SEEDS[$i]}
-        IGNITE_SEEDS="${IGNITE_SEEDS}<value>$seed<\/value>"
-	done
-
-    echo "[INFO] Using Ignite seeds: $IGNITE_SEEDS"
-
-    cat /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml | sed -r "s/\\\$\{IGNITE_SEEDS\}/$IGNITE_SEEDS/g" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
-    rm -f /opt/ignite-cassandra-tests/bootstrap/aws/tests/ignite-cassandra-client-template1.xml
-}
-
-# Setups Cassandra credentials to connect to Cassandra cluster
-setupCassandraCredentials()
-{
-    echo "admin.user=cassandra" > /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-    echo "admin.password=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-    echo "regular.user=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-    echo "regular.password=cassandra" >> /opt/ignite-cassandra-tests/settings/org/apache/ignite/tests/cassandra/credentials.properties
-}
-
-# Triggering first time tests execution for all nodes in the Tests cluster
-triggerFirstTimeTestsExecution()
-{
-    if [ -z "$TESTS_TYPE" ]; then
-        return 0
-    fi
-
-    tryToGetFirstNodeLock
-    if [ $? -ne 0 ]; then
-        return 0
-    fi
-
-    sleep 30s
-
-    echo "[INFO] Triggering first time tests execution"
-
-    echo "TESTS_TYPE=$TESTS_TYPE" > /opt/ignite-cassandra-tests/tests-trigger
-    echo "#--------------------------------------------------" >> /opt/ignite-cassandra-tests/tests-trigger
-    echo "" >> /opt/ignite-cassandra-tests/tests-trigger
-    cat /opt/ignite-cassandra-tests/settings/tests.properties >> /opt/ignite-cassandra-tests/tests-trigger
-
-    aws s3 cp --sse AES256 /opt/ignite-cassandra-tests/tests-trigger $S3_TESTS_TRIGGER
-    code=$?
-
-    rm -f /opt/ignite-cassandra-tests/tests-trigger
-
-    if [ $code -ne 0 ]; then
-        terminate "Failed to create tests trigger: $S3_TESTS_TRIGGER"
-    fi
-}
-
-# Cleans previously created logs from S3
-cleanPreviousLogs()
-{
-	for logFile in /opt/ignite-cassandra-tests/logs/*
-	do
-	    managerLog=$(echo $logFile | grep "tests-manager")
-	    if [ -z "$managerLog" ]; then
-	        rm -Rf $logFile
-	    fi
-	done
-
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-	aws s3 rm --recursive ${S3_TESTS_FAILURE}${HOST_NAME}
-	aws s3 rm --recursive ${S3_TESTS_SUCCESS}${HOST_NAME}
-}
-
-# Uploads tests logs to S3
-uploadTestsLogs()
-{
-    HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]')
-
-    if [ -f "/opt/ignite-cassandra-tests/logs/__success__" ]; then
-        logsFolder=${S3_TESTS_SUCCESS}${HOST_NAME}
-    else
-        logsFolder=${S3_TESTS_FAILURE}${HOST_NAME}
-    fi
-
-    aws s3 rm --recursive $logsFolder
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to drop logs folder: $logsFolder"
-    fi
-
-    if [ -d "/opt/ignite-cassandra-tests/logs" ]; then
-        aws s3 sync --sse AES256 /opt/ignite-cassandra-tests/logs $logsFolder
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to export tests logs to: $logsFolder"
-        fi
-    fi
-}
-
-# Runs tests-report.sh to prepare tests summary report
-buildTestsSummaryReport()
-{
-    reportScript=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/tests-report.sh)
-    $reportScript
-
-    if [ -n "$S3_LOGS_TRIGGER" ]; then
-        aws s3 cp --sse AES256 /etc/hosts $S3_LOGS_TRIGGER
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to trigger logs collection"
-        fi
-    fi
-}
-
-# Running load tests
-runLoadTests()
-{
-    cd /opt/ignite-cassandra-tests
-
-    if [ "$TESTS_TYPE" == "ignite" ]; then
-        echo "[INFO] Running Ignite load tests"
-        ./ignite-load-tests.sh &
-    else
-        echo "[INFO] Running Cassandra load tests"
-        ./cassandra-load-tests.sh &
-    fi
-
-    testsJobId=$!
-
-    echo "[INFO] Tests job id: $testsJobId"
-
-    sleep 1m
-
-    LOGS_SNAPSHOT=$(ls -al /opt/ignite-cassandra-tests/logs)
-    LOGS_SNAPSHOT_TIME=$(date +%s)
-
-    TERMINATED=
-
-    # tests monitoring
-    while true; do
-        proc=$(ps -ef | grep java | grep "org.apache.ignite.tests")
-        if [ -z "$proc" ]; then
-            break
-        fi
-
-        NEW_LOGS_SNAPSHOT=$(ls -al /opt/ignite-cassandra-tests/logs)
-        NEW_LOGS_SNAPSHOT_TIME=$(date +%s)
-
-        # if logs state updated it means that tests are running and not stuck
-        if [ "$LOGS_SNAPSHOT" != "$NEW_LOGS_SNAPSHOT" ]; then
-            LOGS_SNAPSHOT=$NEW_LOGS_SNAPSHOT
-            LOGS_SNAPSHOT_TIME=$NEW_LOGS_SNAPSHOT_TIME
-            continue
-        fi
-
-        duration=$(( $NEW_LOGS_SNAPSHOT_TIME-$LOGS_SNAPSHOT_TIME ))
-        duration=$(( $duration/60 ))
-
-        # if logs wasn't updated during 5min it means that load tests stuck
-        if [ $duration -gt 5 ]; then
-            proc=($proc)
-            kill -9 ${proc[1]}
-            TERMINATED="true"
-            break
-        fi
-
-        echo "[INFO] Waiting extra 30sec for load tests to complete"
-
-        sleep 30s
-    done
-
-    rm -f /opt/ignite-cassandra-tests/logs/tests.properties
-    cp /opt/ignite-cassandra-tests/settings/tests.properties /opt/ignite-cassandra-tests/logs
-
-    if [ "$TERMINATED" == "true" ]; then
-        echo "[ERROR] Load tests stuck, tests process terminated"
-        echo "Load tests stuck, tests process terminated" > /opt/ignite-cassandra-tests/logs/__error__
-        return 0
-    fi
-
-    failed=
-    if [ "$TESTS_TYPE" == "cassandra" ]; then
-        failed=$(cat /opt/ignite-cassandra-tests/cassandra-load-tests.log | grep "load tests execution failed")
-    else
-        failed=$(cat /opt/ignite-cassandra-tests/ignite-load-tests.log | grep "load tests execution failed")
-    fi
-
-    if [ -n "$failed" ]; then
-        echo "[ERROR] Load tests execution failed"
-        echo "Load tests execution failed" > /opt/ignite-cassandra-tests/logs/__error__
-    else
-        echo "[INFO] Load tests execution successfully completed"
-        echo "Load tests execution successfully completed" > /opt/ignite-cassandra-tests/logs/__success__
-    fi
-}
-
-#######################################################################################################
-
-sleep 1m
-
-NODE_STATE=
-TRIGGER_STATE=
-
-printInstanceInfo
-setupCassandraCredentials
-switchToIdleState
-
-triggerFirstTimeTestsExecution
-
-registerNode
-
-while true; do
-    # switching state to IDLE
-    switchToIdleState
-
-    sleep 30s
-
-    NEW_TRIGGER_STATE=$(aws s3 ls $S3_TESTS_TRIGGER | xargs)
-    if [ -z "$NEW_TRIGGER_STATE" ] || [ "$NEW_TRIGGER_STATE" == "$TRIGGER_STATE" ]; then
-        continue
-    fi
-
-    echo "----------------------------------------------------------------------"
-    echo "[INFO] Tests trigger changed"
-    echo "----------------------------------------------------------------------"
-    echo "[INFO] Old trigger: $TRIGGER_STATE"
-    echo "----------------------------------------------------------------------"
-    echo "[INFO] New trigger: $NEW_TRIGGER_STATE"
-    echo "----------------------------------------------------------------------"
-
-    TRIGGER_STATE=$NEW_TRIGGER_STATE
-
-    aws s3 cp $S3_TESTS_TRIGGER /opt/ignite-cassandra-tests/tests-trigger
-    if [ $? -ne 0 ]; then
-        echo "[ERROR] Failed to download tests trigger info from: $S3_TESTS_TRIGGER"
-        continue
-    fi
-
-    TESTS_TYPE=$(cat /opt/ignite-cassandra-tests/tests-trigger | grep TESTS_TYPE | xargs | sed -r "s/TESTS_TYPE=//g")
-    if [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
-        rm -f /opt/ignite-cassandra-tests/tests-trigger
-        echo "[ERROR] Incorrect tests type specified in the trigger info: $S3_TESTS_TRIGGER"
-        continue
-    fi
-
-    rm -f /opt/ignite-cassandra-tests/settings/tests.properties
-    mv -f /opt/ignite-cassandra-tests/tests-trigger /opt/ignite-cassandra-tests/settings/tests.properties
-	
-	waitAllTestNodesCompletedTests
-	
-    # switching state to PREPARING
-    switchToPreparingState
-
-    waitAllClusterNodesReady "cassandra"
-    waitAllClusterNodesReady "ignite"
-    setupCassandraSeeds
-    setupIgniteSeeds
-	
-	cleanPreviousLogs
-
-    tryToGetFirstNodeLock
-    if [ $? -eq 0 ]; then
-        dropTestsSummary
-        recreateCassandraArtifacts
-    fi
-
-    # switching state to WAITING
-    switchToWaitingState
-
-    waitAllClusterNodesReady "test"
-
-    if [ "$FIRST_NODE_LOCK" == "true" ]; then
-        aws s3 rm $S3_TESTS_TRIGGER
-    fi
-
-    # switching state to RUNNING
-    switchToRunningState
-
-    runLoadTests
-    uploadTestsLogs
-
-    tryToGetFirstNodeLock
-    if [ $? -eq 0 ]; then
-        waitAllTestNodesCompletedTests
-        buildTestsSummaryReport
-        removeFirstNodeLock
-    fi
-done
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-report.sh b/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-report.sh
deleted file mode 100644
index 1576d57376166..0000000000000
--- a/modules/cassandra/store/src/test/bootstrap/aws/tests/tests-report.sh
+++ /dev/null
@@ -1,499 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -----------------------------------------------------------------------------------------------
-# Tests report builder
-# -----------------------------------------------------------------------------------------------
-# Script is used to analyze load tests logs collected from all 'Tests' cluster nodes and build
-# summary report
-# -----------------------------------------------------------------------------------------------
-
-#profile=/home/ignite/.bash_profile
-profile=/root/.bash_profile
-
-. $profile
-. /opt/ignite-cassandra-tests/bootstrap/aws/common.sh "test"
-
-# Building tests summary report
-reportTestsSummary()
-{
-    echo "[INFO] Preparing tests results summary"
-
-    TESTS_SUMMARY_DIR=/opt/ignite-cassandra-tests/tests-summary
-    SUCCEED_NODES_FILE=$TESTS_SUMMARY_DIR/succeed-nodes
-    SUCCEED_NODES_DIR=$TESTS_SUMMARY_DIR/succeed
-    FAILED_NODES_FILE=$TESTS_SUMMARY_DIR/failed-nodes
-    FAILED_NODES_DIR=$TESTS_SUMMARY_DIR/failed
-    REPORT_FILE=$TESTS_SUMMARY_DIR/report.txt
-
-    rm -Rf $TESTS_SUMMARY_DIR
-    mkdir -p $TESTS_SUMMARY_DIR
-    mkdir -p $SUCCEED_NODES_DIR
-    mkdir -p $FAILED_NODES_DIR
-
-    aws s3 ls $S3_TESTS_SUCCESS | sed -r "s/PRE //g" | sed -r "s/ //g" | sed -r "s/\///g" > $SUCCEED_NODES_FILE
-    aws s3 ls $S3_TESTS_FAILURE | sed -r "s/PRE //g" | sed -r "s/ //g" | sed -r "s/\///g" > $FAILED_NODES_FILE
-
-    succeedCount=$(cat $SUCCEED_NODES_FILE | wc -l)
-    failedCount=$(cat $FAILED_NODES_FILE | wc -l)
-    count=$(( $succeedCount+$failedCount ))
-
-    echo "Test type         : $TESTS_TYPE" > $REPORT_FILE
-    echo "Test nodes count  : $count" >> $REPORT_FILE
-    echo "Test nodes succeed: $succeedCount" >> $REPORT_FILE
-    echo "Test nodes failed : $failedCount" >> $REPORT_FILE
-    echo "----------------------------------------------------------------------------------------------" >> $REPORT_FILE
-
-    if [ $succeedCount -gt 0 ]; then
-        echo "Succeed test nodes |" >> $REPORT_FILE
-        echo "-------------------" >> $REPORT_FILE
-        cat $SUCCEED_NODES_FILE >> $REPORT_FILE
-        echo "----------------------------------------------------------------------------------------------" >> $REPORT_FILE
-
-        aws s3 sync --delete $S3_TESTS_SUCCESS $SUCCEED_NODES_DIR
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to get succeed tests details"
-        else
-            reportSucceedTestsStatistics "$REPORT_FILE" "$SUCCEED_NODES_DIR"
-        fi
-    fi
-
-    if [ $failedCount -gt 0 ]; then
-        echo "Failed test nodes |" >> $REPORT_FILE
-        echo "------------------" >> $REPORT_FILE
-        cat $FAILED_NODES_FILE >> $REPORT_FILE
-        echo "----------------------------------------------------------------------------------------------" >> $REPORT_FILE
-
-        aws sync --delete $S3_TESTS_FAILURE $FAILED_NODES_DIR
-        if [ $? -ne 0 ]; then
-            echo "[ERROR] Failed to get failed tests details"
-        else
-            reportFailedTestsDetailes "$REPORT_FILE" "$FAILED_NODES_DIR"
-        fi
-    fi
-
-    rm -f $HOME/tests-summary.zip
-
-    pushd $TESTS_SUMMARY_DIR
-
-    zip -r -9 $HOME/tests-summary.zip .
-    code=$?
-
-    rm -Rf $TESTS_SUMMARY_DIR
-
-    popd
-
-    if [ $code -ne 0 ]; then
-        echo "-------------------------------------------------------------------------------------"
-        echo "[ERROR] Failed to create tests summary zip archive $HOME/tests-summary.zip for $TESTS_SUMMARY_DIR"
-        echo "-------------------------------------------------------------------------------------"
-        return 1
-    fi
-
-    aws s3 cp --sse AES256 $HOME/tests-summary.zip $S3_TESTS_SUMMARY
-    if [ $? -ne 0 ]; then
-        echo "-------------------------------------------------------------------------------------"
-        echo "[ERROR] Failed to uploat tests summary archive to: $S3_TESTS_SUMMARY"
-        echo "-------------------------------------------------------------------------------------"
-    else
-        echo "-------------------------------------------------------------------------------------"
-        echo "[INFO] Tests results summary uploaded to: $S3_TESTS_SUMMARY"
-        echo "-------------------------------------------------------------------------------------"
-    fi
-
-    rm -f $HOME/tests-summary.zip
-}
-
-# Creates report for succeed tests
-reportSucceedTestsStatistics()
-{
-    writeMsg="0"
-    writeErrors="0"
-    writeSpeed="0"
-    blkWriteMsg="0"
-    blkWriteErrors="0"
-    blkWriteSpeed="0"
-    readMsg="0"
-    readErrors="0"
-    readSpeed="0"
-    blkReadMsg="0"
-    blkReadErrors="0"
-    blkReadSpeed="0"
-
-    writeErrNodes=
-    blkWriteErrNodes=
-    readErrNodes=
-    blkReadErrNodes=
-
-	tmpFile=`mktemp`
-
-    for dir in $2/*
-    do
-        node=$(echo $dir | sed -r "s/^.*\///g")
-        echo "-------------------------------------------------------------------------------------"
-        echo "[INFO] Gathering statistics from $node test node"
-        echo "-------------------------------------------------------------------------------------"
-
-        logFile=$(ls $dir | grep "${TESTS_TYPE}-load-tests.log" | head -1)
-        if [ -z "$logFile" ]; then
-            echo "[WARN] Node $node marked as succeeded, but it doesn't have \"${TESTS_TYPE}-load-tests.log\" tests results summary file"
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Node $node marked as succeeded," >> $tmpFile
-            echo "but it doesn't have \"${TESTS_TYPE}-load-tests.log\" tests results summary file" >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            continue
-        fi
-
-        logFile=$dir/$logFile
-        if [ ! -f "$logFile" ]; then
-            echo "[WARN] Node $node marked as succeeded, but it doesn't have \"${TESTS_TYPE}-load-tests.log\" tests results summary file"
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Node $node marked as succeeded," >> $tmpFile
-            echo "but it doesn't have \"${TESTS_TYPE}-load-tests.log\" tests results summary file" >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            continue
-        fi
-
-        cnt=$(cat $logFile | grep "^WRITE messages" | sed -r "s/WRITE messages: //g" | xargs)
-        if [ -n "$cnt" ]; then
-            writeMsg=$(bc <<< "$writeMsg + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] WRITE messages: $cnt"
-            else
-                echo "[WARN] WRITE messages count is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "WRITE messages count is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect WRITE messages count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect WRITE messages count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^WRITE errors" | sed -r "s/WRITE errors: //g" | sed -r "s/,.*//g" | xargs)
-        if [ -n "$cnt" ]; then
-            echo "[INFO] WRITE errors: $cnt"
-            writeErrors=$(bc <<< "$writeErrors + $cnt")
-            if [ $cnt -ne 0 ]; then
-                if [ -n "$writeErrNodes" ]; then
-                    writeErrNodes="${writeErrNodes}, "
-                fi
-                writeErrNodes="${writeErrNodes}${node}"
-            fi
-        else
-            echo "[WARN] Failed to detect WRITE errors count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect WRITE errors count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^WRITE speed" | sed -r "s/WRITE speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
-        if [ -n "$cnt" ]; then
-            writeSpeed=$(bc <<< "$writeSpeed + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] WRITE speed: $cnt msg/sec"
-            else
-                echo "[WARN] WRITE speed is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "WRITE speed is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect WRITE speed for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect WRITE speed for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^BULK_WRITE messages" | sed -r "s/BULK_WRITE messages: //g" | xargs)
-        if [ -n "$cnt" ]; then
-            blkWriteMsg=$(bc <<< "$blkWriteMsg + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] BULK_WRITE messages: $cnt"
-            else
-                echo "[WARN] BULK_WRITE messages count is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "BULK_WRITE messages count is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect BULK_WRITE messages count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect BULK_WRITE messages count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^BULK_WRITE errors" | sed -r "s/BULK_WRITE errors: //g" | sed -r "s/,.*//g" | xargs)
-        if [ -n "$cnt" ]; then
-            blkWriteErrors=$(bc <<< "$blkWriteErrors + $cnt")
-            echo "[INFO] BULK_WRITE errors: $cnt"
-            if [ $cnt -ne 0 ]; then
-                if [ -n "$blkWriteErrNodes" ]; then
-                    blkWriteErrNodes="${blkWriteErrNodes}, "
-                fi
-                blkWriteErrNodes="${blkWriteErrNodes}${node}"
-            fi
-        else
-            echo "[WARN] Failed to detect BULK_WRITE errors count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect BULK_WRITE errors count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^BULK_WRITE speed" | sed -r "s/BULK_WRITE speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
-        if [ -n "$cnt" ]; then
-            blkWriteSpeed=$(bc <<< "$blkWriteSpeed + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] BULK_WRITE speed: $cnt msg/sec"
-            else
-                echo "[WARN] BULK_WRITE speed is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "BULK_WRITE speed is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect BULK_WRITE speed for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect BULK_WRITE speed for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^READ messages" | sed -r "s/READ messages: //g" | xargs)
-        if [ -n "$cnt" ]; then
-            readMsg=$(bc <<< "$readMsg + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] READ messages: $cnt"
-            else
-                echo "[WARN] READ messages count is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "READ messages count is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect READ messages count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect READ messages count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^READ errors" | sed -r "s/READ errors: //g" | sed -r "s/,.*//g" | xargs)
-        if [ -n "$cnt" ]; then
-            readErrors=$(bc <<< "$readErrors + $cnt")
-            echo "[INFO] READ errors: $cnt"
-            if [ $cnt -ne 0 ]; then
-                if [ -n "$readErrNodes" ]; then
-                    blkWriteErrNodes="${readErrNodes}, "
-                fi
-                readErrNodes="${readErrNodes}${node}"
-            fi
-        else
-            echo "[WARN] Failed to detect READ errors count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect READ errors count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^READ speed" | sed -r "s/READ speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
-        if [ -n "$cnt" ]; then
-            readSpeed=$(bc <<< "$readSpeed + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] READ speed: $cnt msg/sec"
-            else
-                echo "[WARN] READ speed is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "READ speed is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect READ speed for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect READ speed for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^BULK_READ messages" | sed -r "s/BULK_READ messages: //g" | xargs)
-        if [ -n "$cnt" ]; then
-            blkReadMsg=$(bc <<< "$blkReadMsg + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] BULK_READ messages: $cnt"
-            else
-                echo "[WARN] BULK_READ messages count is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "BULK_READ messages count is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect BULK_READ messages count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect BULK_READ messages count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^BULK_READ errors" | sed -r "s/BULK_READ errors: //g" | sed -r "s/,.*//g" | xargs)
-        if [ -n "$cnt" ]; then
-            blkReadErrors=$(bc <<< "$blkReadErrors + $cnt")
-            echo "[INFO] BULK_READ errors: $cnt"
-            if [ $cnt -ne 0 ]; then
-                if [ -n "$blkReadErrNodes" ]; then
-                    blkReadErrNodes="${blkReadErrNodes}, "
-                fi
-                blkReadErrNodes="${blkReadErrNodes}${node}"
-            fi
-        else
-            echo "[WARN] Failed to detect BULK_READ errors count for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect BULK_READ errors count for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-
-        cnt=$(cat $logFile | grep "^BULK_READ speed" | sed -r "s/BULK_READ speed: //g" | sed -r "s/ msg\/sec//g" | xargs)
-        if [ -n "$cnt" ]; then
-            blkReadSpeed=$(bc <<< "$blkReadSpeed + $cnt")
-            if [ $cnt -ne 0 ]; then
-                echo "[INFO] BULK_READ speed: $cnt msg/sec"
-            else
-                echo "[WARN] BULK_READ speed is zero for $node node. This test probably failed."
-                echo "WARNING |" >> $tmpFile
-                echo "--------" >> $tmpFile
-                echo "BULK_READ speed is zero for $node node. This test probably failed." >> $tmpFile
-                echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-            fi
-        else
-            echo "[WARN] Failed to detect BULK_READ speed for $node node. This test probably failed."
-            echo "WARNING |" >> $tmpFile
-            echo "--------" >> $tmpFile
-            echo "Failed to detect BULK_READ speed for $node node. This test probably failed." >> $tmpFile
-            echo "----------------------------------------------------------------------------------------------" >> $tmpFile
-        fi
-    done
-
-    echo "-------------------------------------------------------------------------------------"
-
-    echo "WRITE test metrics |" >> $1
-    echo "-------------------" >> $1
-    echo "Messages: $writeMsg" >> $1
-    echo "Speed   : $writeSpeed msg/sec" >> $1
-    echo "Errors  : $writeErrors" >> $1
-    echo "----------------------------------------------------------------------------------------------" >> $1
-
-    echo "BULK_WRITE test metrics |" >> $1
-    echo "------------------------" >> $1
-    echo "Messages: $blkWriteMsg" >> $1
-    echo "Speed   : $blkWriteSpeed msg/sec" >> $1
-    echo "Errors  : $blkWriteErrors" >> $1
-    echo "----------------------------------------------------------------------------------------------" >> $1
-
-    echo "READ test metrics |" >> $1
-    echo "------------------" >> $1
-    echo "Messages: $readMsg" >> $1
-    echo "Speed   : $readSpeed msg/sec" >> $1
-    echo "Errors  : $readErrors" >> $1
-    echo "----------------------------------------------------------------------------------------------" >> $1
-
-    echo "BULK_READ test metrics |" >> $1
-    echo "-----------------------" >> $1
-    echo "Messages: $blkReadMsg" >> $1
-    echo "Speed   : $blkReadSpeed msg/sec" >> $1
-    echo "Errors  : $blkReadErrors" >> $1
-    echo "----------------------------------------------------------------------------------------------" >> $1
-
-    if [ -n "$writeErrNodes" ]; then
-        echo "Nodes having WRITE errors |" >> $1
-        echo "-------------------------------" >> $1
-        echo "$writeErrNodes" >> $1
-        echo "----------------------------------------------------------------------------------------------" >> $1
-    fi
-
-    if [ -n "$blkWriteErrNodes" ]; then
-        echo "Nodes having BULK_WRITE errors |" >> $1
-        echo "-------------------------------" >> $1
-        echo "$blkWriteErrNodes" >> $1
-        echo "----------------------------------------------------------------------------------------------" >> $1
-    fi
-
-    if [ -n "$readErrNodes" ]; then
-        echo "Nodes having READ errors |" >> $1
-        echo "-------------------------------" >> $1
-        echo "$readErrNodes" >> $1
-        echo "----------------------------------------------------------------------------------------------" >> $1
-    fi
-
-    if [ -n "$blkReadErrNodes" ]; then
-        echo "Nodes having BULK_READ errors |" >> $1
-        echo "-------------------------------" >> $1
-        echo "$blkReadErrNodes" >> $1
-        echo "----------------------------------------------------------------------------------------------" >> $1
-    fi
-
-    cat $tmpFile >> $1
-
-    rm -f $tmpFile
-}
-
-# Creates report for failed tests
-reportFailedTestsDetailes()
-{
-    for dir in $2/*
-    do
-        node=$(echo $dir | sed -r "s/^.*\///g")
-        if [ -z "$node" ]; then
-            continue
-        fi
-
-        echo "----------------------------------------------------------------------------------------------" >> $1
-        echo "Error details for node: $node" >> $1
-        echo "----------------------------------------------------------------------------------------------" >> $1
-
-        if [ -f "$dir/__error__" ]; then
-            cat $dir/__error__ >> $1
-        else
-            echo "N/A" >> $1
-        fi
-    done
-}
-
-#######################################################################################################
-
-if [ "$TESTS_TYPE" != "ignite" ] && [ "$TESTS_TYPE" != "cassandra" ]; then
-    terminate "Incorrect tests type specified: $TESTS_TYPE"
-fi
-
-reportTestsSummary
\ No newline at end of file
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraConfigTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraConfigTest.java
deleted file mode 100644
index 48ac18050651f..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraConfigTest.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyPersistenceSettings;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Simple test for DDL generator.
- */
-public class CassandraConfigTest {
-    /**
-     * Check if same DDL generated for similar keys and same KeyPersistenceConfiguration.
-     */
-    @Test
-    public void testDDLGeneration() {
-        KeyPersistenceSettings keyPersistenceSettingsA = getKeyPersistenceSettings(KeyA.class);
-        KeyPersistenceSettings keyPersistenceSettingsB = getKeyPersistenceSettings(KeyB.class);
-
-        assertEquals(keyPersistenceSettingsB.getPrimaryKeyDDL(),
-            keyPersistenceSettingsA.getPrimaryKeyDDL());
-
-        assertEquals(keyPersistenceSettingsB.getClusteringDDL(),
-            keyPersistenceSettingsA.getClusteringDDL());
-    }
-
-    /**
-     * @return KeyPersistenceSetting
-     */
-    private KeyPersistenceSettings getKeyPersistenceSettings(Class keyClass) {
-        String cfg = "<persistence keyspace=\"public\">" +
-            " <keyPersistence class=\"" + keyClass.getName() + "\"  strategy=\"POJO\"> \n" +
-            "        <partitionKey>\n" +
-            "            <field name=\"name\" column=\"name\"/>\n" +
-            "            <field name=\"contextId\" column=\"context_id\"/>\n" +
-            "            <field name=\"creationDate\" column=\"creation_date\"/>\n" +
-            "        </partitionKey>\n" +
-            "        <clusterKey>\n" +
-            "            <field name=\"timestamp\" column=\"timestamp\"/>\n" +
-            "        </clusterKey>\n" +
-            "    </keyPersistence>" +
-            " <valuePersistence class=\"java.lang.Object\"  strategy=\"BLOB\">" +
-            " </valuePersistence>" +
-            "</persistence>";
-
-        return new KeyValuePersistenceSettings(cfg).getKeyPersistenceSettings();
-    }
-
-    /**
-     *
-     */
-    public static class BaseKey {
-        /** */
-        @QuerySqlField
-        // Looks like next annotation is ignored when generating DDL,
-        // but Ignite supports this annotation in parent classes.
-//        @AffinityKeyMapped
-        private Integer contextId;
-
-        /** */
-        public Integer getContextId() {
-            return contextId;
-        }
-
-        /** */
-        public void setContextId(Integer contextId) {
-            this.contextId = contextId;
-        }
-    }
-
-    /**
-     *
-     */
-    public static class KeyA extends BaseKey {
-        /** */
-        @QuerySqlField(index = true)
-        private String timestamp;
-
-        /** */
-        @QuerySqlField(index = true)
-        private String name;
-
-        /** */
-        @QuerySqlField
-        private String creationDate;
-
-        /**
-         * Constructor.
-         */
-        public KeyA() {
-        }
-    }
-
-    /**
-     *
-     */
-    public static class KeyB {
-
-        /** */
-        @QuerySqlField(index = true)
-        private String timestamp;
-
-        /** */
-        @QuerySqlField(index = true)
-        private String name;
-
-        /** */
-        @QuerySqlField
-        private String creationDate;
-
-        /** */
-        @QuerySqlField
-//        @AffinityKeyMapped
-        private Integer contextId;
-
-        /**
-         * Constructor.
-         */
-        public KeyB() {
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java
deleted file mode 100644
index 48f85c3426e30..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceLoadTest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import org.apache.ignite.tests.load.LoadTestDriver;
-import org.apache.ignite.tests.load.cassandra.BulkReadWorker;
-import org.apache.ignite.tests.load.cassandra.BulkWriteWorker;
-import org.apache.ignite.tests.load.cassandra.ReadWorker;
-import org.apache.ignite.tests.load.cassandra.WriteWorker;
-import org.apache.ignite.tests.utils.CacheStoreHelper;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-/**
- * Load tests for {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore} implementation of
- * {@link org.apache.ignite.cache.store.CacheStore} which allows to store Ignite cache data into Cassandra tables.
- */
-public class CassandraDirectPersistenceLoadTest extends LoadTestDriver {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger("CassandraLoadTests");
-
-    /**
-     *
-     * @param args Test arguments.
-     */
-    public static void main(String[] args) {
-        try {
-            LOGGER.info("Cassandra load tests execution started");
-
-            LoadTestDriver driver = new CassandraDirectPersistenceLoadTest();
-
-            /**
-             * Load test scripts could be executed from several machines. Current implementation can correctly,
-             * handle situation when Cassandra keyspace/table was dropped - for example by the same load test
-             * started a bit later on another machine. Moreover there is a warm up period for each load test.
-             * Thus all the delays related to keyspaces/tables recreation actions will not affect performance metrics,
-             * but it will be produced lots of "trash" output in the logs (related to correct handling of such
-             * exceptional situation and keyspace/table recreation).
-             *
-             * Thus dropping test keyspaces at the beginning of the tests makes sense only for Unit tests,
-             * but not for Load tests.
-            **/
-
-            //CassandraHelper.dropTestKeyspaces();
-
-            driver.runTest("WRITE", WriteWorker.class, WriteWorker.LOGGER_NAME);
-
-            driver.runTest("BULK_WRITE", BulkWriteWorker.class, BulkWriteWorker.LOGGER_NAME);
-
-            driver.runTest("READ", ReadWorker.class, ReadWorker.LOGGER_NAME);
-
-            driver.runTest("BULK_READ", BulkReadWorker.class, BulkReadWorker.LOGGER_NAME);
-
-            /**
-             * Load test script executed on one machine could complete earlier that the same load test executed from
-             * another machine. Current implementation can correctly handle situation when Cassandra keyspace/table
-             * was dropped (simply recreate it). But dropping keyspace/table during load tests execution and subsequent
-             * recreation of such objects can have SIGNIFICANT EFFECT on final performance metrics.
-             *
-             * Thus dropping test keyspaces at the end of the tests makes sense only for Unit tests,
-             * but not for Load tests.
-             */
-
-            //CassandraHelper.dropTestKeyspaces(); // REVIEW This line is commented by purpose?
-
-            LOGGER.info("Cassandra load tests execution completed");
-        }
-        catch (Throwable e) {
-            LOGGER.error("Cassandra load tests execution failed", e);
-            throw new RuntimeException("Cassandra load tests execution failed", e);
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override protected Logger logger() {
-        return LOGGER;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected Object setup(String logName) {
-        return CacheStoreHelper.createCacheStore(
-            TestsHelper.getLoadTestsCacheName(),
-            TestsHelper.getLoadTestsPersistenceSettings(),
-            CassandraHelper.getAdminDataSrc(),
-            LogManager.getLogger(logName));
-    }
-
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java
deleted file mode 100644
index 0bc33677bb400..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraDirectPersistenceTest.java
+++ /dev/null
@@ -1,767 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.tests.pojos.Person;
-import org.apache.ignite.tests.pojos.PersonId;
-import org.apache.ignite.tests.pojos.Product;
-import org.apache.ignite.tests.pojos.ProductOrder;
-import org.apache.ignite.tests.pojos.SimplePerson;
-import org.apache.ignite.tests.pojos.SimplePersonId;
-import org.apache.ignite.tests.utils.CacheStoreHelper;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tests.utils.TestCacheSession;
-import org.apache.ignite.tests.utils.TestTransaction;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.ignite.transactions.Transaction;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.springframework.core.io.ClassPathResource;
-
-/**
- * Unit tests for {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore} implementation of
- * {@link org.apache.ignite.cache.store.CacheStore} which allows to store Ignite cache data into Cassandra tables.
- */
-public class CassandraDirectPersistenceTest {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger(CassandraDirectPersistenceTest.class.getName());
-
-    /** */
-    @BeforeClass
-    public static void setUpClass() {
-        if (CassandraHelper.useEmbeddedCassandra()) {
-            try {
-                CassandraHelper.startEmbeddedCassandra(LOGGER);
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
-            }
-        }
-
-        LOGGER.info("Testing admin connection to Cassandra");
-        CassandraHelper.testAdminConnection();
-
-        LOGGER.info("Testing regular connection to Cassandra");
-        CassandraHelper.testRegularConnection();
-
-        LOGGER.info("Dropping all artifacts from previous tests execution session");
-        CassandraHelper.dropTestKeyspaces();
-
-        LOGGER.info("Start tests execution");
-    }
-
-    /** */
-    @AfterClass
-    public static void tearDownClass() {
-        try {
-            CassandraHelper.dropTestKeyspaces();
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-
-            if (CassandraHelper.useEmbeddedCassandra()) {
-                try {
-                    CassandraHelper.stopEmbeddedCassandra();
-                }
-                catch (Throwable e) {
-                    LOGGER.error("Failed to stop embedded Cassandra instance", e);
-                }
-            }
-        }
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void primitiveStrategyTest() {
-        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store2 = CacheStoreHelper.createCacheStore("stringTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
-        Collection<CacheEntryImpl<String, String>> strEntries = TestsHelper.generateStringsEntries();
-
-        Collection<Long> fakeLongKeys = TestsHelper.getKeys(longEntries);
-        fakeLongKeys.add(-1L);
-        fakeLongKeys.add(-2L);
-        fakeLongKeys.add(-3L);
-        fakeLongKeys.add(-4L);
-
-        Collection<String> fakeStrKeys = TestsHelper.getKeys(strEntries);
-        fakeStrKeys.add("-1");
-        fakeStrKeys.add("-2");
-        fakeStrKeys.add("-3");
-        fakeStrKeys.add("-4");
-
-        LOGGER.info("Running PRIMITIVE strategy write tests");
-
-        LOGGER.info("Running single write operation tests");
-        store1.write(longEntries.iterator().next());
-        store2.write(strEntries.iterator().next());
-        LOGGER.info("Single write operation tests passed");
-
-        LOGGER.info("Running bulk write operation tests");
-        store1.writeAll(longEntries);
-        store2.writeAll(strEntries);
-        LOGGER.info("Bulk write operation tests passed");
-
-        LOGGER.info("PRIMITIVE strategy write tests passed");
-
-        LOGGER.info("Running PRIMITIVE strategy read tests");
-
-        LOGGER.info("Running single read operation tests");
-
-        LOGGER.info("Running real keys read tests");
-
-        Long longVal = (Long)store1.load(longEntries.iterator().next().getKey());
-        if (!longEntries.iterator().next().getValue().equals(longVal))
-            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
-
-        String strVal = (String)store2.load(strEntries.iterator().next().getKey());
-        if (!strEntries.iterator().next().getValue().equals(strVal))
-            throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Running fake keys read tests");
-
-        longVal = (Long)store1.load(-1L);
-        if (longVal != null)
-            throw new RuntimeException("Long value with fake key '-1' was found in Cassandra");
-
-        strVal = (String)store2.load("-1");
-        if (strVal != null)
-            throw new RuntimeException("String value with fake key '-1' was found in Cassandra");
-
-        LOGGER.info("Single read operation tests passed");
-
-        LOGGER.info("Running bulk read operation tests");
-
-        LOGGER.info("Running real keys read tests");
-
-        Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
-        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
-            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
-
-        Map strValues = store2.loadAll(TestsHelper.getKeys(strEntries));
-        if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
-            throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Running fake keys read tests");
-
-        longValues = store1.loadAll(fakeLongKeys);
-        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
-            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
-
-        strValues = store2.loadAll(fakeStrKeys);
-        if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
-            throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Bulk read operation tests passed");
-
-        LOGGER.info("PRIMITIVE strategy read tests passed");
-
-        LOGGER.info("Running PRIMITIVE strategy delete tests");
-
-        LOGGER.info("Deleting real keys");
-
-        store1.delete(longEntries.iterator().next().getKey());
-        store1.deleteAll(TestsHelper.getKeys(longEntries));
-
-        store2.delete(strEntries.iterator().next().getKey());
-        store2.deleteAll(TestsHelper.getKeys(strEntries));
-
-        LOGGER.info("Deleting fake keys");
-
-        store1.delete(-1L);
-        store2.delete("-1");
-
-        store1.deleteAll(fakeLongKeys);
-        store2.deleteAll(fakeStrKeys);
-
-        LOGGER.info("PRIMITIVE strategy delete tests passed");
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void blobStrategyTest() {
-        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
-        Collection<CacheEntryImpl<Long, Person>> personEntries = TestsHelper.generateLongsPersonsEntries();
-
-        LOGGER.info("Running BLOB strategy write tests");
-
-        LOGGER.info("Running single write operation tests");
-        store1.write(longEntries.iterator().next());
-        store2.write(personEntries.iterator().next());
-        store3.write(personEntries.iterator().next());
-        LOGGER.info("Single write operation tests passed");
-
-        LOGGER.info("Running bulk write operation tests");
-        store1.writeAll(longEntries);
-        store2.writeAll(personEntries);
-        store3.writeAll(personEntries);
-        LOGGER.info("Bulk write operation tests passed");
-
-        LOGGER.info("BLOB strategy write tests passed");
-
-        LOGGER.info("Running BLOB strategy read tests");
-
-        LOGGER.info("Running single read operation tests");
-
-        Long longVal = (Long)store1.load(longEntries.iterator().next().getKey());
-        if (!longEntries.iterator().next().getValue().equals(longVal))
-            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
-
-        Person personVal = (Person)store2.load(personEntries.iterator().next().getKey());
-        if (!personEntries.iterator().next().getValue().equals(personVal))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        personVal = (Person)store3.load(personEntries.iterator().next().getKey());
-        if (!personEntries.iterator().next().getValue().equals(personVal))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Single read operation tests passed");
-
-        LOGGER.info("Running bulk read operation tests");
-
-        Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
-        if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
-            throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
-
-        Map personValues = store2.loadAll(TestsHelper.getKeys(personEntries));
-        if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        personValues = store3.loadAll(TestsHelper.getKeys(personEntries));
-        if (!TestsHelper.checkPersonCollectionsEqual(personValues, personEntries, false))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Bulk read operation tests passed");
-
-        LOGGER.info("BLOB strategy read tests passed");
-
-        LOGGER.info("Running BLOB strategy delete tests");
-
-        store1.delete(longEntries.iterator().next().getKey());
-        store1.deleteAll(TestsHelper.getKeys(longEntries));
-
-        store2.delete(personEntries.iterator().next().getKey());
-        store2.deleteAll(TestsHelper.getKeys(personEntries));
-
-        store3.delete(personEntries.iterator().next().getKey());
-        store3.deleteAll(TestsHelper.getKeys(personEntries));
-
-        LOGGER.info("BLOB strategy delete tests passed");
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void pojoStrategyTest() {
-        CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore store4 = CacheStoreHelper.createCacheStore("persons",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore productStore = CacheStoreHelper.createCacheStore("product",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore orderStore = CacheStoreHelper.createCacheStore("order",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<Long, Person>> entries1 = TestsHelper.generateLongsPersonsEntries();
-        Collection<CacheEntryImpl<PersonId, Person>> entries2 = TestsHelper.generatePersonIdsPersonsEntries();
-        Collection<CacheEntryImpl<PersonId, Person>> entries3 = TestsHelper.generatePersonIdsPersonsEntries();
-        Collection<CacheEntryImpl<Long, Product>> productEntries = TestsHelper.generateProductEntries();
-        Collection<CacheEntryImpl<Long, ProductOrder>> orderEntries = TestsHelper.generateOrderEntries();
-
-        LOGGER.info("Running POJO strategy write tests");
-
-        LOGGER.info("Running single write operation tests");
-        store1.write(entries1.iterator().next());
-        store2.write(entries2.iterator().next());
-        store3.write(entries3.iterator().next());
-        store4.write(entries3.iterator().next());
-        productStore.write(productEntries.iterator().next());
-        orderStore.write(orderEntries.iterator().next());
-        LOGGER.info("Single write operation tests passed");
-
-        LOGGER.info("Running bulk write operation tests");
-        store1.writeAll(entries1);
-        store2.writeAll(entries2);
-        store3.writeAll(entries3);
-        store4.writeAll(entries3);
-        productStore.writeAll(productEntries);
-        orderStore.writeAll(orderEntries);
-        LOGGER.info("Bulk write operation tests passed");
-
-        LOGGER.info("POJO strategy write tests passed");
-
-        LOGGER.info("Running POJO strategy read tests");
-
-        LOGGER.info("Running single read operation tests");
-
-        Person person = (Person)store1.load(entries1.iterator().next().getKey());
-        if (!entries1.iterator().next().getValue().equalsPrimitiveFields(person))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        person = (Person)store2.load(entries2.iterator().next().getKey());
-        if (!entries2.iterator().next().getValue().equalsPrimitiveFields(person))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        person = (Person)store3.load(entries3.iterator().next().getKey());
-        if (!entries3.iterator().next().getValue().equals(person))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        person = (Person)store4.load(entries3.iterator().next().getKey());
-        if (!entries3.iterator().next().getValue().equals(person))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        Product product = (Product)productStore.load(productEntries.iterator().next().getKey());
-        if (!productEntries.iterator().next().getValue().equals(product))
-            throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
-
-        ProductOrder order = (ProductOrder)orderStore.load(orderEntries.iterator().next().getKey());
-        if (!orderEntries.iterator().next().getValue().equals(order))
-            throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Single read operation tests passed");
-
-        LOGGER.info("Running bulk read operation tests");
-
-        Map persons = store1.loadAll(TestsHelper.getKeys(entries1));
-        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries1, true))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        persons = store2.loadAll(TestsHelper.getKeys(entries2));
-        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries2, true))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        persons = store3.loadAll(TestsHelper.getKeys(entries3));
-        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        persons = store4.loadAll(TestsHelper.getKeys(entries3));
-        if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
-            throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
-
-        Map products = productStore.loadAll(TestsHelper.getKeys(productEntries));
-        if (!TestsHelper.checkProductCollectionsEqual(products, productEntries))
-            throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
-
-        Map orders = orderStore.loadAll(TestsHelper.getKeys(orderEntries));
-        if (!TestsHelper.checkOrderCollectionsEqual(orders, orderEntries))
-            throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Bulk read operation tests passed");
-
-        LOGGER.info("POJO strategy read tests passed");
-
-        LOGGER.info("Running POJO strategy delete tests");
-
-        store1.delete(entries1.iterator().next().getKey());
-        store1.deleteAll(TestsHelper.getKeys(entries1));
-
-        store2.delete(entries2.iterator().next().getKey());
-        store2.deleteAll(TestsHelper.getKeys(entries2));
-
-        store3.delete(entries3.iterator().next().getKey());
-        store3.deleteAll(TestsHelper.getKeys(entries3));
-
-        store4.delete(entries3.iterator().next().getKey());
-        store4.deleteAll(TestsHelper.getKeys(entries3));
-
-        productStore.delete(productEntries.iterator().next().getKey());
-        productStore.deleteAll(TestsHelper.getKeys(productEntries));
-
-        orderStore.delete(orderEntries.iterator().next().getKey());
-        orderStore.deleteAll(TestsHelper.getKeys(orderEntries));
-
-        LOGGER.info("POJO strategy delete tests passed");
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void pojoStrategySimpleObjectsTest() {
-        CacheStore store5 = CacheStoreHelper.createCacheStore("persons5",
-                new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-5.xml"),
-                CassandraHelper.getAdminDataSrc());
-
-        CacheStore store6 = CacheStoreHelper.createCacheStore("persons6",
-                new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-6.xml"),
-                CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<SimplePersonId, SimplePerson>> entries5 = TestsHelper.generateSimplePersonIdsPersonsEntries();
-        Collection<CacheEntryImpl<SimplePersonId, SimplePerson>> entries6 = TestsHelper.generateSimplePersonIdsPersonsEntries();
-
-        LOGGER.info("Running POJO strategy write tests for simple objects");
-
-        LOGGER.info("Running single write operation tests");
-        store5.write(entries5.iterator().next());
-        store6.write(entries6.iterator().next());
-        LOGGER.info("Single write operation tests passed");
-
-        LOGGER.info("Running bulk write operation tests");
-        store5.writeAll(entries5);
-        store6.writeAll(entries6);
-        LOGGER.info("Bulk write operation tests passed");
-
-        LOGGER.info("POJO strategy write tests for simple objects passed");
-
-        LOGGER.info("Running POJO simple objects strategy read tests");
-
-        LOGGER.info("Running single read operation tests");
-
-        SimplePerson person = (SimplePerson)store5.load(entries5.iterator().next().getKey());
-        if (!entries5.iterator().next().getValue().equalsPrimitiveFields(person))
-            throw new RuntimeException("SimplePerson values were incorrectly deserialized from Cassandra");
-
-        person = (SimplePerson)store6.load(entries6.iterator().next().getKey());
-        if (!entries6.iterator().next().getValue().equalsPrimitiveFields(person))
-            throw new RuntimeException("SimplePerson values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Single read operation tests passed");
-
-        LOGGER.info("Running bulk read operation tests");
-
-        Map persons = store5.loadAll(TestsHelper.getKeys(entries5));
-        if (!TestsHelper.checkSimplePersonCollectionsEqual(persons, entries5, true))
-            throw new RuntimeException("SimplePerson values were incorrectly deserialized from Cassandra");
-
-        persons = store6.loadAll(TestsHelper.getKeys(entries6));
-        if (!TestsHelper.checkSimplePersonCollectionsEqual(persons, entries6, true))
-            throw new RuntimeException("SimplePerson values were incorrectly deserialized from Cassandra");
-
-        LOGGER.info("Bulk read operation tests passed");
-
-        LOGGER.info("POJO strategy read tests for simple objects passed");
-
-        LOGGER.info("Running POJO strategy delete tests for simple objects");
-
-        store5.delete(entries5.iterator().next().getKey());
-        store5.deleteAll(TestsHelper.getKeys(entries5));
-
-        store6.delete(entries6.iterator().next().getKey());
-        store6.deleteAll(TestsHelper.getKeys(entries6));
-
-        LOGGER.info("POJO strategy delete tests for simple objects passed");
-    }
-
-    /** */
-    @Test
-    @SuppressWarnings("unchecked")
-    public void pojoStrategyTransactionTest() {
-        Map<Object, Object> sessionProps = U.newHashMap(1);
-        Transaction sessionTx = new TestTransaction();
-
-        CacheStore productStore = CacheStoreHelper.createCacheStore("product",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"),
-            CassandraHelper.getAdminDataSrc(), new TestCacheSession("product", sessionTx, sessionProps));
-
-        CacheStore orderStore = CacheStoreHelper.createCacheStore("order",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"),
-            CassandraHelper.getAdminDataSrc(), new TestCacheSession("order", sessionTx, sessionProps));
-
-        List<CacheEntryImpl<Long, Product>> productEntries = TestsHelper.generateProductEntries();
-        Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> ordersPerProduct =
-                TestsHelper.generateOrdersPerProductEntries(productEntries, 2);
-
-        Collection<Long> productIds = TestsHelper.getProductIds(productEntries);
-        Collection<Long> orderIds = TestsHelper.getOrderIds(ordersPerProduct);
-
-        LOGGER.info("Running POJO strategy transaction write tests");
-
-        LOGGER.info("Running single write operation tests");
-
-        CassandraHelper.dropTestKeyspaces();
-
-        Product product = productEntries.iterator().next().getValue();
-        ProductOrder order = ordersPerProduct.get(product.getId()).iterator().next().getValue();
-
-        productStore.write(productEntries.iterator().next());
-        orderStore.write(ordersPerProduct.get(product.getId()).iterator().next());
-
-        if (productStore.load(product.getId()) != null || orderStore.load(order.getId()) != null) {
-            throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
-                    "objects were already persisted into Cassandra");
-        }
-
-        Map<Long, Product> products = (Map<Long, Product>)productStore.loadAll(productIds);
-        Map<Long, ProductOrder> orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
-            throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
-                    "objects were already persisted into Cassandra");
-        }
-
-        //noinspection deprecation
-        orderStore.sessionEnd(true);
-        //noinspection deprecation
-        productStore.sessionEnd(true);
-
-        Product product1 = (Product)productStore.load(product.getId());
-        ProductOrder order1 = (ProductOrder)orderStore.load(order.getId());
-
-        if (product1 == null || order1 == null) {
-            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
-                    "no objects were persisted into Cassandra");
-        }
-
-        if (!product.equals(product1) || !order.equals(order1)) {
-            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
-                    "objects were incorrectly persisted/loaded to/from Cassandra");
-        }
-
-        products = (Map<Long, Product>)productStore.loadAll(productIds);
-        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
-            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
-                    "no objects were persisted into Cassandra");
-        }
-
-        if (products.size() > 1 || orders.size() > 1) {
-            throw new RuntimeException("Single write operation test failed. There were committed more objects " +
-                    "into Cassandra than expected");
-        }
-
-        product1 = products.entrySet().iterator().next().getValue();
-        order1 = orders.entrySet().iterator().next().getValue();
-
-        if (!product.equals(product1) || !order.equals(order1)) {
-            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
-                    "objects were incorrectly persisted/loaded to/from Cassandra");
-        }
-
-        LOGGER.info("Single write operation tests passed");
-
-        LOGGER.info("Running bulk write operation tests");
-
-        CassandraHelper.dropTestKeyspaces();
-        sessionProps.clear();
-
-        productStore.writeAll(productEntries);
-
-        for (Long productId : ordersPerProduct.keySet())
-            orderStore.writeAll(ordersPerProduct.get(productId));
-
-        for (Long productId : productIds) {
-            if (productStore.load(productId) != null) {
-                throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
-                        "objects were already persisted into Cassandra");
-            }
-        }
-
-        for (Long orderId : orderIds) {
-            if (orderStore.load(orderId) != null) {
-                throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
-                        "objects were already persisted into Cassandra");
-            }
-        }
-
-        products = (Map<Long, Product>)productStore.loadAll(productIds);
-        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
-            throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
-                    "objects were already persisted into Cassandra");
-        }
-
-        //noinspection deprecation
-        productStore.sessionEnd(true);
-        //noinspection deprecation
-        orderStore.sessionEnd(true);
-
-        for (CacheEntryImpl<Long, Product> entry : productEntries) {
-            product = (Product)productStore.load(entry.getKey());
-
-            if (!entry.getValue().equals(product)) {
-                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                        "not all objects were persisted into Cassandra");
-            }
-        }
-
-        for (Long productId : ordersPerProduct.keySet()) {
-            for (CacheEntryImpl<Long, ProductOrder> entry : ordersPerProduct.get(productId)) {
-                order = (ProductOrder)orderStore.load(entry.getKey());
-
-                if (!entry.getValue().equals(order)) {
-                    throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                            "not all objects were persisted into Cassandra");
-                }
-            }
-        }
-
-        products = (Map<Long, Product>)productStore.loadAll(productIds);
-        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
-            throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                    "no objects were persisted into Cassandra");
-        }
-
-        if (products.size() < productIds.size() || orders.size() < orderIds.size()) {
-            throw new RuntimeException("Bulk write operation test failed. There were committed less objects " +
-                    "into Cassandra than expected");
-        }
-
-        if (products.size() > productIds.size() || orders.size() > orderIds.size()) {
-            throw new RuntimeException("Bulk write operation test failed. There were committed more objects " +
-                    "into Cassandra than expected");
-        }
-
-        for (CacheEntryImpl<Long, Product> entry : productEntries) {
-            product = products.get(entry.getKey());
-
-            if (!entry.getValue().equals(product)) {
-                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                        "some objects were incorrectly persisted/loaded to/from Cassandra");
-            }
-        }
-
-        for (Long productId : ordersPerProduct.keySet()) {
-            for (CacheEntryImpl<Long, ProductOrder> entry : ordersPerProduct.get(productId)) {
-                order = orders.get(entry.getKey());
-
-                if (!entry.getValue().equals(order)) {
-                    throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                            "some objects were incorrectly persisted/loaded to/from Cassandra");
-                }
-            }
-        }
-
-        LOGGER.info("Bulk write operation tests passed");
-
-        LOGGER.info("POJO strategy transaction write tests passed");
-
-        LOGGER.info("Running POJO strategy transaction delete tests");
-
-        LOGGER.info("Running single delete tests");
-
-        sessionProps.clear();
-
-        Product deletedProduct = productEntries.remove(0).getValue();
-        ProductOrder deletedOrder = ordersPerProduct.get(deletedProduct.getId()).remove(0).getValue();
-
-        productStore.delete(deletedProduct.getId());
-        orderStore.delete(deletedOrder.getId());
-
-        if (productStore.load(deletedProduct.getId()) == null || orderStore.load(deletedOrder.getId()) == null) {
-            throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " +
-                    "objects were already deleted from Cassandra");
-        }
-
-        products = (Map<Long, Product>)productStore.loadAll(productIds);
-        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if (products.size() != productIds.size() || orders.size() != orderIds.size()) {
-            throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " +
-                    "objects were already deleted from Cassandra");
-        }
-
-        //noinspection deprecation
-        productStore.sessionEnd(true);
-        //noinspection deprecation
-        orderStore.sessionEnd(true);
-
-        if (productStore.load(deletedProduct.getId()) != null || orderStore.load(deletedOrder.getId()) != null) {
-            throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " +
-                    "objects were not deleted from Cassandra");
-        }
-
-        products = (Map<Long, Product>)productStore.loadAll(productIds);
-        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if (products.get(deletedProduct.getId()) != null || orders.get(deletedOrder.getId()) != null) {
-            throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " +
-                    "objects were not deleted from Cassandra");
-        }
-
-        LOGGER.info("Single delete tests passed");
-
-        LOGGER.info("Running bulk delete tests");
-
-        sessionProps.clear();
-
-        productStore.deleteAll(productIds);
-        orderStore.deleteAll(orderIds);
-
-        products = (Map<Long, Product>)productStore.loadAll(productIds);
-        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if (products == null || products.isEmpty() || orders == null || orders.isEmpty()) {
-            throw new RuntimeException("Bulk delete operation test failed. Transaction wasn't committed yet, but " +
-                    "objects were already deleted from Cassandra");
-        }
-
-        //noinspection deprecation
-        orderStore.sessionEnd(true);
-        //noinspection deprecation
-        productStore.sessionEnd(true);
-
-        products = (Map<Long, Product>)productStore.loadAll(productIds);
-        orders = (Map<Long, ProductOrder>)orderStore.loadAll(orderIds);
-
-        if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
-            throw new RuntimeException("Bulk delete operation test failed. Transaction was committed, but " +
-                    "objects were not deleted from Cassandra");
-        }
-
-        LOGGER.info("Bulk delete tests passed");
-
-        LOGGER.info("POJO strategy transaction delete tests passed");
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraLocalServer.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraLocalServer.java
deleted file mode 100644
index a229d955e300c..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraLocalServer.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-/**
- * Simple helper class to run Cassandra on localhost
- */
-public class CassandraLocalServer {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger(CassandraLocalServer.class.getName());
-
-    /** */
-    public static void main(String[] args) {
-        try {
-            CassandraHelper.startEmbeddedCassandra(LOGGER);
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to start embedded Cassandra instance", e);
-        }
-
-        LOGGER.info("Testing admin connection to Cassandra");
-        CassandraHelper.testAdminConnection();
-
-        LOGGER.info("Testing regular connection to Cassandra");
-        CassandraHelper.testRegularConnection();
-
-        LOGGER.info("Dropping all artifacts from previous tests execution session");
-        CassandraHelper.dropTestKeyspaces();
-
-        while (true) {
-            try {
-                System.out.println("Cassandra server running");
-
-                Thread.sleep(10000);
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Cassandra server terminated", e);
-            }
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraSessionImplTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraSessionImplTest.java
deleted file mode 100644
index a3a2bcdb7114e..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/CassandraSessionImplTest.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-import com.datastax.driver.core.BoundStatement;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ColumnDefinitions;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.PreparedId;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.ResultSetFuture;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.Statement;
-import com.datastax.driver.core.exceptions.InvalidQueryException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.cache.store.cassandra.session.BatchExecutionAssistant;
-import org.apache.ignite.cache.store.cassandra.session.CassandraSessionImpl;
-import org.apache.ignite.cache.store.cassandra.session.WrappedPreparedStatement;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.nullable;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/** */
-public class CassandraSessionImplTest {
-
-    /** */
-    private PreparedStatement preparedStatement1 = mockPreparedStatement();
-
-    /** */
-    private PreparedStatement preparedStatement2 = mockPreparedStatement();
-
-    /** */
-    private MyBoundStatement1 boundStatement1 = new MyBoundStatement1(preparedStatement1);
-
-    /** */
-    private MyBoundStatement2 boundStatement2 = new MyBoundStatement2(preparedStatement2);
-
-    /** */
-    @SuppressWarnings("unchecked")
-    @Test
-    public void executeFailureTest() {
-        Session session1 = mock(Session.class);
-        Session session2 = mock(Session.class);
-        when(session1.prepare(nullable(String.class))).thenReturn(preparedStatement1);
-        when(session2.prepare(nullable(String.class))).thenReturn(preparedStatement2);
-
-        ResultSetFuture rsFuture = mock(ResultSetFuture.class);
-        ResultSet rs = mock(ResultSet.class);
-        Iterator it = mock(Iterator.class);
-        when(it.hasNext()).thenReturn(true);
-        when(it.next()).thenReturn(mock(Row.class));
-        when(rs.iterator()).thenReturn(it);
-        when(rsFuture.getUninterruptibly()).thenReturn(rs);
-        /* @formatter:off */
-        when(session1.executeAsync(any(Statement.class)))
-            .thenThrow(new InvalidQueryException("You may have used a PreparedStatement that was created with another Cluster instance"))
-            .thenThrow(new RuntimeException("this session should be refreshed / recreated"));
-        when(session2.executeAsync(boundStatement1))
-            .thenThrow(new InvalidQueryException("You may have used a PreparedStatement that was created with another Cluster instance"));
-        when(session2.executeAsync(boundStatement2)).thenReturn(rsFuture);
-        /* @formatter:on */
-
-        Cluster cluster = mock(Cluster.class);
-        when(cluster.connect()).thenReturn(session1).thenReturn(session2);
-        when(session1.getCluster()).thenReturn(cluster);
-        when(session2.getCluster()).thenReturn(cluster);
-
-        Cluster.Builder builder = mock(Cluster.Builder.class);
-        when(builder.build()).thenReturn(cluster);
-
-        CassandraSessionImpl cassandraSession = new CassandraSessionImpl(builder, null,
-                ConsistencyLevel.ONE, ConsistencyLevel.ONE, 0, mock(IgniteLogger.class));
-
-        BatchExecutionAssistant<String, String> batchExecutionAssistant = new MyBatchExecutionAssistant();
-        ArrayList<String> data = new ArrayList<>();
-        for (int i = 0; i < 10; i++) {
-            data.add(String.valueOf(i));
-        }
-        cassandraSession.execute(batchExecutionAssistant, data);
-
-        verify(cluster, times(2)).connect();
-        verify(session1, times(1)).prepare(nullable(String.class));
-        verify(session2, times(1)).prepare(nullable(String.class));
-        assertEquals(10, batchExecutionAssistant.processedCount());
-    }
-
-    /** */
-    private static PreparedStatement mockPreparedStatement() {
-        PreparedStatement ps = mock(PreparedStatement.class);
-        when(ps.getVariables()).thenReturn(mock(ColumnDefinitions.class));
-        when(ps.getPreparedId()).thenReturn(mock(PreparedId.class));
-        when(ps.getQueryString()).thenReturn("insert into xxx");
-        return ps;
-    }
-
-    /** */
-    private class MyBatchExecutionAssistant implements BatchExecutionAssistant {
-        /** */
-        private Set<Integer> processed = new HashSet<>();
-
-        /** {@inheritDoc} */
-        @Override public void process(Row row, int seqNum) {
-            if (processed.contains(seqNum))
-                return;
-
-            processed.add(seqNum);
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean alreadyProcessed(int seqNum) {
-            return processed.contains(seqNum);
-        }
-
-        /** {@inheritDoc} */
-        @Override public int processedCount() {
-            return processed.size();
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean tableExistenceRequired() {
-            return false;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String getTable() {
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String getStatement() {
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public BoundStatement bindStatement(PreparedStatement statement, Object obj) {
-            if (statement instanceof WrappedPreparedStatement)
-                statement = ((WrappedPreparedStatement)statement).getWrappedStatement();
-
-            if (statement == preparedStatement1) {
-                return boundStatement1;
-            }
-            else if (statement == preparedStatement2) {
-                return boundStatement2;
-            }
-
-            throw new RuntimeException("unexpected");
-        }
-
-        /** {@inheritDoc} */
-        @Override public KeyValuePersistenceSettings getPersistenceSettings() {
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String operationName() {
-            return null;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Object processedData() {
-            return null;
-        }
-
-    }
-
-    /** */
-    private static class MyBoundStatement1 extends BoundStatement {
-        /** */
-        MyBoundStatement1(PreparedStatement ps) {
-            super(ps);
-        }
-
-    }
-
-    /** */
-    private static class MyBoundStatement2 extends BoundStatement {
-        /** */
-        MyBoundStatement2(PreparedStatement ps) {
-            super(ps);
-        }
-    }
-
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java
deleted file mode 100644
index 60169e7c82d78..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DDLGeneratorTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.net.URL;
-import org.apache.ignite.cache.store.cassandra.utils.DDLGenerator;
-import org.junit.Test;
-
-/**
- * DDLGenerator test.
- */
-public class DDLGeneratorTest {
-    /** */
-    private static final String[] RESOURCES = new String[] {
-        "org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml",
-        "org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml",
-        "org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml",
-        "org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml",
-        "org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml",
-        "org/apache/ignite/tests/persistence/pojo/persistence-settings-5.xml",
-        "org/apache/ignite/tests/persistence/pojo/persistence-settings-6.xml",
-        "org/apache/ignite/tests/persistence/pojo/product.xml",
-        "org/apache/ignite/tests/persistence/pojo/order.xml"
-    };
-
-    /**
-     * Test DDL generator.
-     */
-    @Test
-    public void generatorTest() {
-        String[] files = new String[RESOURCES.length];
-
-        ClassLoader clsLdr = DDLGeneratorTest.class.getClassLoader();
-
-        for (int i = 0; i < RESOURCES.length; i++) {
-            URL url = clsLdr.getResource(RESOURCES[i]);
-            if (url == null)
-                throw new IllegalStateException("Failed to find resource: " + RESOURCES[i]);
-
-            files[i] = url.getFile();
-        }
-
-        DDLGenerator.main(files);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DatasourceSerializationTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DatasourceSerializationTest.java
deleted file mode 100644
index e981dea8c833c..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/DatasourceSerializationTest.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.io.Serializable;
-import java.lang.reflect.Field;
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.Host;
-import com.datastax.driver.core.HostDistance;
-import com.datastax.driver.core.Statement;
-import com.datastax.driver.core.policies.LoadBalancingPolicy;
-import com.datastax.driver.core.policies.RoundRobinPolicy;
-import com.datastax.driver.core.policies.TokenAwarePolicy;
-import org.apache.ignite.cache.store.cassandra.datasource.Credentials;
-import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
-import org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer;
-import org.apache.ignite.tests.utils.CassandraAdminCredentials;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for datasource serialization.
- */
-public class DatasourceSerializationTest {
-    /**
-     * Sample class for serialization test.
-     */
-    private static class MyLoadBalancingPolicy implements LoadBalancingPolicy, Serializable {
-        /** */
-        private transient LoadBalancingPolicy plc = new TokenAwarePolicy(new RoundRobinPolicy());
-
-        /** {@inheritDoc} */
-        @Override public void init(Cluster cluster, Collection<Host> hosts) {
-            plc.init(cluster, hosts);
-        }
-
-        /** {@inheritDoc} */
-        @Override public HostDistance distance(Host host) {
-            return plc.distance(host);
-        }
-
-        /** {@inheritDoc} */
-        @Override public Iterator<Host> newQueryPlan(String loggedKeyspace, Statement statement) {
-            return plc.newQueryPlan(loggedKeyspace, statement);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onAdd(Host host) {
-            plc.onAdd(host);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onUp(Host host) {
-            plc.onUp(host);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onDown(Host host) {
-            plc.onDown(host);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void onRemove(Host host) {
-            plc.onRemove(host);
-        }
-
-        /** {@inheritDoc} */
-        @Override public void close() {
-            plc.close();
-        }
-    }
-
-    /**
-     * Serialization test.
-     */
-    @Test
-    public void serializationTest() {
-        DataSource src = new DataSource();
-
-        Credentials cred = new CassandraAdminCredentials();
-        String[] points = new String[]{"127.0.0.1", "10.0.0.2", "10.0.0.3"};
-        LoadBalancingPolicy plc = new MyLoadBalancingPolicy();
-
-        src.setCredentials(cred);
-        src.setContactPoints(points);
-        src.setReadConsistency("ONE");
-        src.setWriteConsistency("QUORUM");
-        src.setLoadBalancingPolicy(plc);
-
-        JavaSerializer serializer = new JavaSerializer();
-
-        ByteBuffer buff = serializer.serialize(src);
-        DataSource _src = (DataSource)serializer.deserialize(buff);
-
-        Credentials _cred = (Credentials)getFieldValue(_src, "creds");
-        List<InetAddress> _points = (List<InetAddress>)getFieldValue(_src, "contactPoints");
-        ConsistencyLevel _readCons = (ConsistencyLevel)getFieldValue(_src, "readConsistency");
-        ConsistencyLevel _writeCons = (ConsistencyLevel)getFieldValue(_src, "writeConsistency");
-        LoadBalancingPolicy _plc = (LoadBalancingPolicy)getFieldValue(_src, "loadBalancingPlc");
-
-        assertTrue("Incorrectly serialized/deserialized credentials for Cassandra DataSource",
-            cred.getPassword().equals(_cred.getPassword()) && cred.getUser().equals(_cred.getUser()));
-
-        assertTrue("Incorrectly serialized/deserialized contact points for Cassandra DataSource",
-            "/127.0.0.1".equals(_points.get(0).toString()) &&
-            "/10.0.0.2".equals(_points.get(1).toString()) &&
-            "/10.0.0.3".equals(_points.get(2).toString()));
-
-        assertTrue("Incorrectly serialized/deserialized consistency levels for Cassandra DataSource",
-            ConsistencyLevel.ONE == _readCons && ConsistencyLevel.QUORUM == _writeCons);
-
-        assertTrue("Incorrectly serialized/deserialized load balancing policy for Cassandra DataSource",
-            _plc instanceof MyLoadBalancingPolicy);
-    }
-
-    /**
-     * @param obj Object.
-     * @param field Field name.
-     * @return Field value.
-     */
-    private Object getFieldValue(Object obj, String field) {
-        try {
-            Field f = obj.getClass().getDeclaredField(field);
-
-            f.setAccessible(true);
-
-            return f.get(obj);
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to get field '" + field + "' value", e);
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java
deleted file mode 100644
index 9a759bc3c0563..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreLoadTest.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import org.apache.ignite.Ignite;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.tests.load.LoadTestDriver;
-import org.apache.ignite.tests.load.ignite.BulkReadWorker;
-import org.apache.ignite.tests.load.ignite.BulkWriteWorker;
-import org.apache.ignite.tests.load.ignite.ReadWorker;
-import org.apache.ignite.tests.load.ignite.WriteWorker;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-/**
- * Load tests for Ignite caches which utilizing {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}
- * to store cache data into Cassandra tables
- */
-public class IgnitePersistentStoreLoadTest extends LoadTestDriver {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger("IgniteLoadTests");
-
-    /**
-     * test starter.
-     *
-     * @param args Test arguments.
-     */
-    public static void main(String[] args) {
-        try {
-            LOGGER.info("Ignite load tests execution started");
-
-            LoadTestDriver driver = new IgnitePersistentStoreLoadTest();
-
-            /**
-             * Load test scripts could be executed from several machines. Current implementation can correctly,
-             * handle situation when Cassandra keyspace/table was dropped - for example by the same load test
-             * started a bit later on another machine. Moreover there is a warm up period for each load test.
-             * Thus all the delays related to keyspaces/tables recreation actions will not affect performance metrics,
-             * but it will be produced lots of "trash" output in the logs (related to correct handling of such
-             * exceptional situation and keyspace/table recreation).
-             *
-             * Thus dropping test keyspaces makes sense only for Unit tests, but not for Load tests.
-            **/
-
-            //CassandraHelper.dropTestKeyspaces();
-
-            driver.runTest("WRITE", WriteWorker.class, WriteWorker.LOGGER_NAME);
-
-            driver.runTest("BULK_WRITE", BulkWriteWorker.class, BulkWriteWorker.LOGGER_NAME);
-
-            driver.runTest("READ", ReadWorker.class, ReadWorker.LOGGER_NAME);
-
-            driver.runTest("BULK_READ", BulkReadWorker.class, BulkReadWorker.LOGGER_NAME);
-
-            /**
-             * Load test script executed on one machine could complete earlier that the same load test executed from
-             * another machine. Current implementation can correctly handle situation when Cassandra keyspace/table
-             * was dropped (simply recreate it). But dropping keyspace/table during load tests execution and subsequent
-             * recreation of such objects can have SIGNIFICANT EFFECT on final performance metrics.
-             *
-             * Thus dropping test keyspaces at the end of the tests makes sense only for Unit tests,
-             * but not for Load tests.
-             */
-
-            //CassandraHelper.dropTestKeyspaces();
-
-            LOGGER.info("Ignite load tests execution completed");
-        }
-        catch (Throwable e) {
-            LOGGER.error("Ignite load tests execution failed", e);
-            throw new RuntimeException("Ignite load tests execution failed", e);
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override protected Logger logger() {
-        return LOGGER;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected Object setup(String logName) {
-        return Ignition.start(TestsHelper.getLoadTestsIgniteConfig());
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void tearDown(Object obj) {
-        Ignite ignite = (Ignite)obj;
-
-        if (ignite != null)
-            ignite.close();
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStorePrimitiveTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStorePrimitiveTest.java
deleted file mode 100644
index 5b2799a2b59c1..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStorePrimitiveTest.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.io.IOException;
-import java.net.URL;
-import com.datastax.driver.core.policies.RoundRobinPolicy;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory;
-import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.tests.utils.CassandraAdminCredentials;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Unit test for Ignite caches which utilizing {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}
- * to store primitive type cache data into Cassandra table.
- */
-public class IgnitePersistentStorePrimitiveTest {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger(IgnitePersistentStorePrimitiveTest.class.getName());
-
-    /** */
-    @BeforeClass
-    public static void setUpClass() {
-        if (CassandraHelper.useEmbeddedCassandra()) {
-            try {
-                CassandraHelper.startEmbeddedCassandra(LOGGER);
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
-            }
-        }
-
-        LOGGER.info("Testing admin connection to Cassandra");
-        CassandraHelper.testAdminConnection();
-
-        LOGGER.info("Testing regular connection to Cassandra");
-        CassandraHelper.testRegularConnection();
-
-        LOGGER.info("Dropping all artifacts from previous tests execution session");
-        CassandraHelper.dropTestKeyspaces();
-
-        LOGGER.info("Start tests execution");
-    }
-
-    /** */
-    @AfterClass
-    public static void tearDownClass() {
-        try {
-            CassandraHelper.dropTestKeyspaces();
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-
-            if (CassandraHelper.useEmbeddedCassandra()) {
-                try {
-                    CassandraHelper.stopEmbeddedCassandra();
-                }
-                catch (Throwable e) {
-                    LOGGER.error("Failed to stop embedded Cassandra instance", e);
-                }
-            }
-        }
-    }
-
-    /** */
-    @Test
-    public void test() throws IOException {
-        IgniteConfiguration config = igniteConfig();
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start(config)) {
-            IgniteCache<Long, Long> cache = ignite.getOrCreateCache("cache1");
-            cache.put(12L, 12L);
-        }
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start(config)) {
-            IgniteCache<Long, Long> cache = ignite.getOrCreateCache("cache1");
-
-            assertEquals(12L, (long)cache.get(12L));
-
-            cache.remove(12L);
-        }
-    }
-
-    /** */
-    private IgniteConfiguration igniteConfig() throws IOException {
-        URL url = getClass().getClassLoader().getResource("org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml");
-        String persistence = U.readFileToString(url.getFile(), "UTF-8");
-        KeyValuePersistenceSettings persistenceSettings = new KeyValuePersistenceSettings(persistence);
-
-        DataSource dataSource = new DataSource();
-        dataSource.setContactPoints(CassandraHelper.getContactPointsArray());
-        dataSource.setCredentials(new CassandraAdminCredentials());
-        dataSource.setLoadBalancingPolicy(new RoundRobinPolicy());
-
-        CassandraCacheStoreFactory<Long, Long> storeFactory = new CassandraCacheStoreFactory<>();
-        storeFactory.setDataSource(dataSource);
-        storeFactory.setPersistenceSettings(persistenceSettings);
-
-        CacheConfiguration<Long, Long> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("cache1");
-        cacheConfiguration.setReadThrough(true);
-        cacheConfiguration.setWriteThrough(true);
-        cacheConfiguration.setCacheStoreFactory(storeFactory);
-
-        IgniteConfiguration config = new IgniteConfiguration();
-        config.setCacheConfiguration(cacheConfiguration);
-
-        return config;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java
deleted file mode 100644
index 1aeade57aab11..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/IgnitePersistentStoreTest.java
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.io.IOException;
-import java.net.URL;
-import java.time.Instant;
-import java.util.Collection;
-import java.util.Map;
-import com.datastax.driver.core.SimpleStatement;
-import com.datastax.driver.core.policies.RoundRobinPolicy;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.IgniteTransactions;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.binary.BinaryObject;
-import org.apache.ignite.cache.CachePeekMode;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory;
-import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.binary.BinaryMarshaller;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.tests.pojos.Person;
-import org.apache.ignite.tests.pojos.PersonId;
-import org.apache.ignite.tests.pojos.Product;
-import org.apache.ignite.tests.pojos.ProductOrder;
-import org.apache.ignite.tests.pojos.SimplePerson;
-import org.apache.ignite.tests.pojos.SimplePersonId;
-import org.apache.ignite.tests.utils.CacheStoreHelper;
-import org.apache.ignite.tests.utils.CassandraAdminCredentials;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.ignite.transactions.Transaction;
-import org.apache.ignite.transactions.TransactionConcurrency;
-import org.apache.ignite.transactions.TransactionIsolation;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.springframework.core.io.ClassPathResource;
-
-/**
- * Unit tests for Ignite caches which utilizing {@link org.apache.ignite.cache.store.cassandra.CassandraCacheStore}
- * to store cache data into Cassandra tables
- */
-public class IgnitePersistentStoreTest {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger(IgnitePersistentStoreTest.class.getName());
-
-    /** */
-    @BeforeClass
-    public static void setUpClass() {
-        if (CassandraHelper.useEmbeddedCassandra()) {
-            try {
-                CassandraHelper.startEmbeddedCassandra(LOGGER);
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
-            }
-        }
-
-        LOGGER.info("Testing admin connection to Cassandra");
-        CassandraHelper.testAdminConnection();
-
-        LOGGER.info("Testing regular connection to Cassandra");
-        CassandraHelper.testRegularConnection();
-
-        LOGGER.info("Dropping all artifacts from previous tests execution session");
-        CassandraHelper.dropTestKeyspaces();
-
-        LOGGER.info("Start tests execution");
-    }
-
-    /** */
-    @AfterClass
-    public static void tearDownClass() {
-        try {
-            CassandraHelper.dropTestKeyspaces();
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-
-            if (CassandraHelper.useEmbeddedCassandra()) {
-                try {
-                    CassandraHelper.stopEmbeddedCassandra();
-                }
-                catch (Throwable e) {
-                    LOGGER.error("Failed to stop embedded Cassandra instance", e);
-                }
-            }
-        }
-    }
-
-    /** */
-    @Test
-    public void primitiveStrategyTest() {
-        Ignition.stopAll(true);
-
-        Map<Long, Long> longMap = TestsHelper.generateLongsMap();
-        Map<String, String> strMap = TestsHelper.generateStringsMap();
-
-        LOGGER.info("Running PRIMITIVE strategy write tests");
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/primitive/ignite-config.xml")) {
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<String, String> strCache = ignite.getOrCreateCache(new CacheConfiguration<String, String>("cache2"));
-
-            LOGGER.info("Running single operation write tests");
-            longCache.put(1L, 1L);
-            strCache.put("1", "1");
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            longCache.putAll(longMap);
-            strCache.putAll(strMap);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("PRIMITIVE strategy write tests passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/primitive/ignite-config.xml")) {
-            LOGGER.info("Running PRIMITIVE strategy read tests");
-
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<String, String> strCache = ignite.getOrCreateCache(new CacheConfiguration<String, String>("cache2"));
-
-            LOGGER.info("Running single operation read tests");
-
-            Long longVal = longCache.get(1L);
-            if (!longVal.equals(longMap.get(1L)))
-                throw new RuntimeException("Long value was incorrectly deserialized from Cassandra");
-
-            String strVal = strCache.get("1");
-            if (!strVal.equals(strMap.get("1")))
-                throw new RuntimeException("String value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<Long, Long> longMap1 = longCache.getAll(longMap.keySet());
-            if (!TestsHelper.checkMapsEqual(longMap, longMap1))
-                throw new RuntimeException("Long values batch was incorrectly deserialized from Cassandra");
-
-            Map<String, String> strMap1 = strCache.getAll(strMap.keySet());
-            if (!TestsHelper.checkMapsEqual(strMap, strMap1))
-                throw new RuntimeException("String values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("PRIMITIVE strategy read tests passed");
-
-            LOGGER.info("Running PRIMITIVE strategy delete tests");
-
-            longCache.remove(1L);
-            longCache.removeAll(longMap.keySet());
-
-            strCache.remove("1");
-            strCache.removeAll(strMap.keySet());
-
-            LOGGER.info("PRIMITIVE strategy delete tests passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void blobStrategyTest() {
-        Ignition.stopAll(true);
-
-        Map<Long, Long> longMap = TestsHelper.generateLongsMap();
-        Map<Long, Person> personMap = TestsHelper.generateLongsPersonsMap();
-
-        LOGGER.info("Running BLOB strategy write tests");
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/blob/ignite-config.xml")) {
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<Long, Person> personCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache2"));
-
-            LOGGER.info("Running single operation write tests");
-            longCache.put(1L, 1L);
-            personCache.put(1L, TestsHelper.generateRandomPerson(1L));
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            longCache.putAll(longMap);
-            personCache.putAll(personMap);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("BLOB strategy write tests passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/blob/ignite-config.xml")) {
-            LOGGER.info("Running BLOB strategy read tests");
-
-            IgniteCache<Long, Long> longCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Long>("cache1"));
-            IgniteCache<Long, Person> personCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache2"));
-
-            LOGGER.info("Running single operation read tests");
-
-            Long longVal = longCache.get(1L);
-            if (!longVal.equals(longMap.get(1L)))
-                throw new RuntimeException("Long value was incorrectly deserialized from Cassandra");
-
-            Person person = personCache.get(1L);
-            if (!person.equals(personMap.get(1L)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<Long, Long> longMap1 = longCache.getAll(longMap.keySet());
-            if (!TestsHelper.checkMapsEqual(longMap, longMap1))
-                throw new RuntimeException("Long values batch was incorrectly deserialized from Cassandra");
-
-            Map<Long, Person> personMap1 = personCache.getAll(personMap.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(personMap, personMap1, false))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("BLOB strategy read tests passed");
-
-            LOGGER.info("Running BLOB strategy delete tests");
-
-            longCache.remove(1L);
-            longCache.removeAll(longMap.keySet());
-
-            personCache.remove(1L);
-            personCache.removeAll(personMap.keySet());
-
-            LOGGER.info("BLOB strategy delete tests passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void blobBinaryLoadCacheTest() {
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/loadall_blob/ignite-config.xml")) {
-            IgniteCache<Long, PojoPerson> personCache = ignite.getOrCreateCache("cache2");
-
-            assert ignite.configuration().getMarshaller() instanceof BinaryMarshaller;
-
-            personCache.put(1L, new PojoPerson(1, "name"));
-
-            assert personCache.withKeepBinary().get(1L) instanceof BinaryObject;
-        }
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/loadall_blob/ignite-config.xml")) {
-            IgniteCache<Long, PojoPerson> personCache = ignite.getOrCreateCache("cache2");
-
-            personCache.loadCache(null, null);
-
-            PojoPerson person = personCache.get(1L);
-
-            LOGGER.info("loadCache tests passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void pojoStrategyTest() {
-        Ignition.stopAll(true);
-
-        LOGGER.info("Running POJO strategy write tests");
-
-        Map<Long, Person> personMap1 = TestsHelper.generateLongsPersonsMap();
-        Map<PersonId, Person> personMap2 = TestsHelper.generatePersonIdsPersonsMap();
-        Map<Long, Product> productsMap = TestsHelper.generateProductsMap();
-        Map<Long, ProductOrder> ordersMap = TestsHelper.generateOrdersMap();
-
-        Product product = TestsHelper.generateRandomProduct(-1L);
-        ProductOrder order = TestsHelper.generateRandomOrder(-1L);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            IgniteCache<Long, Person> personCache1 = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache1"));
-            IgniteCache<PersonId, Person> personCache2 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache2"));
-            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
-            IgniteCache<PersonId, Person> personCache4 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache4"));
-            IgniteCache<Long, Product> productCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Product>("product"));
-            IgniteCache<Long, ProductOrder> orderCache = ignite.getOrCreateCache(new CacheConfiguration<Long, ProductOrder>("order"));
-
-            LOGGER.info("Running single operation write tests");
-
-            personCache1.put(1L, TestsHelper.generateRandomPerson(1L));
-
-            PersonId id = TestsHelper.generateRandomPersonId();
-            personCache2.put(id, TestsHelper.generateRandomPerson(id.getPersonNumber()));
-
-            id = TestsHelper.generateRandomPersonId();
-            personCache3.put(id, TestsHelper.generateRandomPerson(id.getPersonNumber()));
-            personCache4.put(id, TestsHelper.generateRandomPerson(id.getPersonNumber()));
-
-            productCache.put(product.getId(), product);
-            orderCache.put(order.getId(), order);
-
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            personCache1.putAll(personMap1);
-            personCache2.putAll(personMap2);
-            personCache3.putAll(personMap2);
-            personCache4.putAll(personMap2);
-            productCache.putAll(productsMap);
-            orderCache.putAll(ordersMap);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("POJO strategy write tests passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            LOGGER.info("Running POJO strategy read tests");
-
-            IgniteCache<Long, Person> personCache1 = ignite.getOrCreateCache(new CacheConfiguration<Long, Person>("cache1"));
-            IgniteCache<PersonId, Person> personCache2 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache2"));
-            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache3"));
-            IgniteCache<PersonId, Person> personCache4 = ignite.getOrCreateCache(new CacheConfiguration<PersonId, Person>("cache4"));
-            IgniteCache<Long, Product> productCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Product>("product"));
-            IgniteCache<Long, ProductOrder> orderCache = ignite.getOrCreateCache(new CacheConfiguration<Long, ProductOrder>("order"));
-
-            LOGGER.info("Running single operation read tests");
-            Person person = personCache1.get(1L);
-            if (!person.equalsPrimitiveFields(personMap1.get(1L)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            PersonId id = personMap2.keySet().iterator().next();
-
-            person = personCache2.get(id);
-            if (!person.equalsPrimitiveFields(personMap2.get(id)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            person = personCache3.get(id);
-            if (!person.equals(personMap2.get(id)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            person = personCache4.get(id);
-            if (!person.equals(personMap2.get(id)))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            Product product1 = productCache.get(product.getId());
-            if (!product.equals(product1))
-                throw new RuntimeException("Product value was incorrectly deserialized from Cassandra");
-
-            ProductOrder order1 = orderCache.get(order.getId());
-            if (!order.equals(order1))
-                throw new RuntimeException("Order value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<Long, Person> persons1 = personCache1.getAll(personMap1.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(persons1, personMap1, true))
-                throw new RuntimeException("Persons values batch was incorrectly deserialized from Cassandra");
-
-            Map<PersonId, Person> persons2 = personCache2.getAll(personMap2.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(persons2, personMap2, true))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            Map<PersonId, Person> persons3 = personCache3.getAll(personMap2.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(persons3, personMap2, false))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            Map<PersonId, Person> persons4 = personCache4.getAll(personMap2.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(persons4, personMap2, false))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            Map<Long, Product> productsMap1 = productCache.getAll(productsMap.keySet());
-            if (!TestsHelper.checkProductMapsEqual(productsMap, productsMap1))
-                throw new RuntimeException("Product values batch was incorrectly deserialized from Cassandra");
-
-            Map<Long, ProductOrder> ordersMap1 = orderCache.getAll(ordersMap.keySet());
-            if (!TestsHelper.checkOrderMapsEqual(ordersMap, ordersMap1))
-                throw new RuntimeException("Order values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("POJO strategy read tests passed");
-
-            LOGGER.info("Running POJO strategy delete tests");
-
-            personCache1.remove(1L);
-            personCache1.removeAll(personMap1.keySet());
-
-            personCache2.remove(id);
-            personCache2.removeAll(personMap2.keySet());
-
-            personCache3.remove(id);
-            personCache3.removeAll(personMap2.keySet());
-
-            personCache4.remove(id);
-            personCache4.removeAll(personMap2.keySet());
-
-            productCache.remove(product.getId());
-            productCache.removeAll(productsMap.keySet());
-
-            orderCache.remove(order.getId());
-            orderCache.removeAll(ordersMap.keySet());
-
-            LOGGER.info("POJO strategy delete tests passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void pojoStrategySimpleObjectsTest() {
-        Ignition.stopAll(true);
-
-        LOGGER.info("Running POJO strategy write tests for simple objects");
-
-        Map<SimplePersonId, SimplePerson> personMap5 = TestsHelper.generateSimplePersonIdsPersonsMap();
-        Map<SimplePersonId, SimplePerson> personMap6 = TestsHelper.generateSimplePersonIdsPersonsMap();
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            IgniteCache<SimplePersonId, SimplePerson> personCache5 =
-                ignite.getOrCreateCache(new CacheConfiguration<SimplePersonId, SimplePerson>("cache5"));
-            IgniteCache<SimplePersonId, SimplePerson> personCache6 =
-                ignite.getOrCreateCache(new CacheConfiguration<SimplePersonId, SimplePerson>("cache6"));
-
-            LOGGER.info("Running single operation write tests");
-
-            SimplePersonId id = TestsHelper.generateRandomSimplePersonId();
-            personCache5.put(id, TestsHelper.generateRandomSimplePerson(id.personNum));
-            personCache6.put(id, TestsHelper.generateRandomSimplePerson(id.personNum));
-
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            personCache5.putAll(personMap5);
-            personCache6.putAll(personMap6);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("POJO strategy write tests for simple objects passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            LOGGER.info("Running POJO strategy read tests for simple objects");
-
-            IgniteCache<SimplePersonId, SimplePerson> personCache5 =
-                ignite.getOrCreateCache(new CacheConfiguration<SimplePersonId, SimplePerson>("cache5"));
-            IgniteCache<SimplePersonId, SimplePerson> personCache6 =
-                ignite.getOrCreateCache(new CacheConfiguration<SimplePersonId, SimplePerson>("cache6"));
-
-            LOGGER.info("Running single operation read tests");
-
-            SimplePersonId id = personMap5.keySet().iterator().next();
-
-            SimplePerson person = personCache5.get(id);
-            if (!person.equalsPrimitiveFields(personMap5.get(id)))
-                throw new RuntimeException("SimplePerson value was incorrectly deserialized from Cassandra");
-
-            id = personMap6.keySet().iterator().next();
-
-            person = personCache6.get(id);
-            if (!person.equals(personMap6.get(id)))
-                throw new RuntimeException("SimplePerson value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<SimplePersonId, SimplePerson> persons5 = personCache5.getAll(personMap5.keySet());
-            if (!TestsHelper.checkSimplePersonMapsEqual(persons5, personMap5, true))
-                throw new RuntimeException("SimplePerson values batch was incorrectly deserialized from Cassandra");
-
-            Map<SimplePersonId, SimplePerson> persons6 = personCache6.getAll(personMap6.keySet());
-            if (!TestsHelper.checkSimplePersonMapsEqual(persons6, personMap6, false))
-                throw new RuntimeException("SimplePerson values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("POJO strategy read tests for simple objects passed");
-
-            LOGGER.info("Running POJO strategy delete tests for simple objects");
-
-            personCache5.remove(id);
-            personCache5.removeAll(personMap5.keySet());
-
-            personCache6.remove(id);
-            personCache6.removeAll(personMap6.keySet());
-
-            LOGGER.info("POJO strategy delete tests for simple objects passed");
-        }
-    }
-
-    /** */
-    @Test
-    public void pojoStrategyTransactionTest() {
-        CassandraHelper.dropTestKeyspaces();
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            pojoStrategyTransactionTest(ignite, TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED);
-            pojoStrategyTransactionTest(ignite, TransactionConcurrency.OPTIMISTIC, TransactionIsolation.REPEATABLE_READ);
-            pojoStrategyTransactionTest(ignite, TransactionConcurrency.OPTIMISTIC, TransactionIsolation.SERIALIZABLE);
-            pojoStrategyTransactionTest(ignite, TransactionConcurrency.PESSIMISTIC, TransactionIsolation.READ_COMMITTED);
-            pojoStrategyTransactionTest(ignite, TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ);
-            pojoStrategyTransactionTest(ignite, TransactionConcurrency.PESSIMISTIC, TransactionIsolation.SERIALIZABLE);
-        }
-    }
-
-    /** */
-    @Test
-    public void loadCacheTest() {
-        Ignition.stopAll(true);
-
-        LOGGER.info("Running loadCache test");
-
-        LOGGER.info("Filling Cassandra table with test data");
-
-        CacheStore store = CacheStoreHelper.createCacheStore("personTypes",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Collection<CacheEntryImpl<PersonId, Person>> entries = TestsHelper.generatePersonIdsPersonsEntries();
-
-        //noinspection unchecked
-        store.writeAll(entries);
-
-        LOGGER.info("Cassandra table filled with test data");
-
-        LOGGER.info("Running loadCache test");
-
-        try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
-            CacheConfiguration<PersonId, Person> ccfg = new CacheConfiguration<>("cache3");
-
-            IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(ccfg);
-
-            int size = personCache3.size(CachePeekMode.ALL);
-
-            LOGGER.info("Initial cache size " + size);
-
-            LOGGER.info("Loading cache data from Cassandra table");
-
-            String qry = "select * from test1.pojo_test3 limit 3";
-
-            personCache3.loadCache(null, qry);
-
-            size = personCache3.size(CachePeekMode.ALL);
-            Assert.assertEquals("Cache data was incorrectly loaded from Cassandra table by '" + qry + "'", 3, size);
-
-            personCache3.clear();
-
-            personCache3.loadCache(null, new SimpleStatement(qry));
-
-            size = personCache3.size(CachePeekMode.ALL);
-            Assert.assertEquals("Cache data was incorrectly loaded from Cassandra table by statement", 3, size);
-
-            personCache3.clear();
-
-            personCache3.loadCache(null);
-
-            size = personCache3.size(CachePeekMode.ALL);
-            Assert.assertEquals("Cache data was incorrectly loaded from Cassandra. " +
-                    "Expected number of records is " + TestsHelper.getBulkOperationSize() +
-                    ", but loaded number of records is " + size,
-                TestsHelper.getBulkOperationSize(), size);
-
-            LOGGER.info("Cache data loaded from Cassandra table");
-        }
-
-        LOGGER.info("loadCache test passed");
-    }
-
-    /** */
-    @SuppressWarnings("unchecked")
-    private void pojoStrategyTransactionTest(Ignite ignite, TransactionConcurrency concurrency,
-                                             TransactionIsolation isolation) {
-        LOGGER.info("-----------------------------------------------------------------------------------");
-        LOGGER.info("Running POJO transaction tests using " + concurrency +
-                " concurrency and " + isolation + " isolation level");
-        LOGGER.info("-----------------------------------------------------------------------------------");
-
-        CacheStore productStore = CacheStoreHelper.createCacheStore("product",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        CacheStore orderStore = CacheStoreHelper.createCacheStore("order",
-            new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"),
-            CassandraHelper.getAdminDataSrc());
-
-        Map<Long, Product> productsMap = TestsHelper.generateProductsMap(5);
-        Map<Long, Product> productsMap1;
-        Map<Long, ProductOrder> ordersMap = TestsHelper.generateOrdersMap(5);
-        Map<Long, ProductOrder> ordersMap1;
-        Product product = TestsHelper.generateRandomProduct(-1L);
-        ProductOrder order = TestsHelper.generateRandomOrder(-1L, -1L, Instant.now());
-
-        IgniteTransactions txs = ignite.transactions();
-
-        IgniteCache<Long, Product> productCache = ignite.getOrCreateCache(new CacheConfiguration<Long, Product>("product"));
-        IgniteCache<Long, ProductOrder> orderCache = ignite.getOrCreateCache(new CacheConfiguration<Long, ProductOrder>("order"));
-
-        LOGGER.info("Running POJO strategy write tests");
-
-        LOGGER.info("Running single operation write tests");
-
-        Transaction tx = txs.txStart(concurrency, isolation);
-
-        try {
-            productCache.put(product.getId(), product);
-            orderCache.put(order.getId(), order);
-
-            if (productStore.load(product.getId()) != null || orderStore.load(order.getId()) != null) {
-                throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
-                        "objects were already persisted into Cassandra");
-            }
-
-            Map<Long, Product> products = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
-            Map<Long, ProductOrder> orders = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
-
-            if ((products != null && !products.isEmpty()) || (orders != null && !orders.isEmpty())) {
-                throw new RuntimeException("Single write operation test failed. Transaction wasn't committed yet, but " +
-                        "objects were already persisted into Cassandra");
-            }
-
-            tx.commit();
-        }
-        finally {
-            U.closeQuiet(tx);
-        }
-
-        Product product1 = (Product)productStore.load(product.getId());
-        ProductOrder order1 = (ProductOrder)orderStore.load(order.getId());
-
-        if (product1 == null || order1 == null) {
-            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
-                    "no objects were persisted into Cassandra");
-        }
-
-        if (!product.equals(product1) || !order.equals(order1)) {
-            throw new RuntimeException("Single write operation test failed. Transaction was committed, but " +
-                    "objects were incorrectly persisted/loaded to/from Cassandra");
-        }
-
-        LOGGER.info("Single operation write tests passed");
-
-        LOGGER.info("Running bulk operation write tests");
-
-        tx = txs.txStart(concurrency, isolation);
-
-        try {
-            productCache.putAll(productsMap);
-            orderCache.putAll(ordersMap);
-
-            productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
-            ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
-
-            if ((productsMap1 != null && !productsMap1.isEmpty()) || (ordersMap1 != null && !ordersMap1.isEmpty())) {
-                throw new RuntimeException("Bulk write operation test failed. Transaction wasn't committed yet, but " +
-                        "objects were already persisted into Cassandra");
-            }
-
-            tx.commit();
-        }
-        finally {
-            U.closeQuiet(tx);
-        }
-
-        productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
-        ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
-
-        if (productsMap1 == null || productsMap1.isEmpty() || ordersMap1 == null || ordersMap1.isEmpty()) {
-            throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                    "no objects were persisted into Cassandra");
-        }
-
-        if (productsMap1.size() < productsMap.size() || ordersMap1.size() < ordersMap.size()) {
-            throw new RuntimeException("Bulk write operation test failed. There were committed less objects " +
-                    "into Cassandra than expected");
-        }
-
-        if (productsMap1.size() > productsMap.size() || ordersMap1.size() > ordersMap.size()) {
-            throw new RuntimeException("Bulk write operation test failed. There were committed more objects " +
-                    "into Cassandra than expected");
-        }
-
-        for (Map.Entry<Long, Product> entry : productsMap.entrySet()) {
-            product = productsMap1.get(entry.getKey());
-
-            if (!entry.getValue().equals(product)) {
-                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                        "some objects were incorrectly persisted/loaded to/from Cassandra");
-            }
-        }
-
-        for (Map.Entry<Long, ProductOrder> entry : ordersMap.entrySet()) {
-            order = ordersMap1.get(entry.getKey());
-
-            if (!entry.getValue().equals(order)) {
-                throw new RuntimeException("Bulk write operation test failed. Transaction was committed, but " +
-                        "some objects were incorrectly persisted/loaded to/from Cassandra");
-            }
-        }
-
-        LOGGER.info("Bulk operation write tests passed");
-
-        LOGGER.info("POJO strategy write tests passed");
-
-        LOGGER.info("Running POJO strategy delete tests");
-
-        LOGGER.info("Running single delete tests");
-
-        tx = txs.txStart(concurrency, isolation);
-
-        try {
-            productCache.remove(-1L);
-            orderCache.remove(-1L);
-
-            if (productStore.load(-1L) == null || orderStore.load(-1L) == null) {
-                throw new RuntimeException("Single delete operation test failed. Transaction wasn't committed yet, but " +
-                        "objects were already deleted from Cassandra");
-            }
-
-            tx.commit();
-        }
-        finally {
-            U.closeQuiet(tx);
-        }
-
-        if (productStore.load(-1L) != null || orderStore.load(-1L) != null) {
-            throw new RuntimeException("Single delete operation test failed. Transaction was committed, but " +
-                    "objects were not deleted from Cassandra");
-        }
-
-        LOGGER.info("Single delete tests passed");
-
-        LOGGER.info("Running bulk delete tests");
-
-        tx = txs.txStart(concurrency, isolation);
-
-        try {
-            productCache.removeAll(productsMap.keySet());
-            orderCache.removeAll(ordersMap.keySet());
-
-            productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
-            ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
-
-            if (productsMap1.size() != productsMap.size() || ordersMap1.size() != ordersMap.size()) {
-                throw new RuntimeException("Bulk delete operation test failed. Transaction wasn't committed yet, but " +
-                        "objects were already deleted from Cassandra");
-            }
-
-            tx.commit();
-        }
-        finally {
-            U.closeQuiet(tx);
-        }
-
-        productsMap1 = (Map<Long, Product>)productStore.loadAll(productsMap.keySet());
-        ordersMap1 = (Map<Long, ProductOrder>)orderStore.loadAll(ordersMap.keySet());
-
-        if ((productsMap1 != null && !productsMap1.isEmpty()) || (ordersMap1 != null && !ordersMap1.isEmpty())) {
-            throw new RuntimeException("Bulk delete operation test failed. Transaction was committed, but " +
-                    "objects were not deleted from Cassandra");
-        }
-
-        LOGGER.info("Bulk delete tests passed");
-
-        LOGGER.info("POJO strategy delete tests passed");
-
-        LOGGER.info("-----------------------------------------------------------------------------------");
-        LOGGER.info("Passed POJO transaction tests for " + concurrency +
-                " concurrency and " + isolation + " isolation level");
-        LOGGER.info("-----------------------------------------------------------------------------------");
-    }
-
-    /**
-     * KeyValuePersistenceSettings is passed directly, not as a bean and should be
-     * serialized and deserialized correctly
-     */
-    @Test
-    public void directPersistenceConfigTest() throws IOException {
-        Map<PersonId, Person> personMap = TestsHelper.generatePersonIdsPersonsMap();
-        PersonId id = TestsHelper.generateRandomPersonId();
-        Person person = TestsHelper.generateRandomPerson(id.getPersonNumber());
-
-        IgniteConfiguration config = igniteConfig();
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start(config)) {
-            LOGGER.info("Running POJO strategy write tests");
-            IgniteCache<PersonId, Person> cache = ignite.getOrCreateCache("cache1");
-
-            LOGGER.info("Running single operation write tests");
-            cache.put(id, TestsHelper.generateRandomPerson(id.getPersonNumber()));
-            cache.put(id, person);
-            LOGGER.info("Single operation write tests passed");
-
-            LOGGER.info("Running bulk operation write tests");
-            cache.putAll(personMap);
-            LOGGER.info("Bulk operation write tests passed");
-        }
-
-        LOGGER.info("POJO strategy write tests passed");
-
-        Ignition.stopAll(true);
-
-        try (Ignite ignite = Ignition.start(config)) {
-            LOGGER.info("Running POJO strategy read tests");
-            IgniteCache<PersonId, Person> cache = ignite.getOrCreateCache("cache1");
-
-            Person actualPerson = cache.get(id);
-            if (!person.equals(actualPerson))
-                throw new RuntimeException("Person value was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Single operation read tests passed");
-
-            LOGGER.info("Running bulk operation read tests");
-
-            Map<PersonId, Person> actualPersonMap = cache.getAll(personMap.keySet());
-            if (!TestsHelper.checkPersonMapsEqual(actualPersonMap, personMap, true))
-                throw new RuntimeException("Person values batch was incorrectly deserialized from Cassandra");
-
-            LOGGER.info("Bulk operation read tests passed");
-
-            LOGGER.info("POJO strategy read tests passed");
-
-            LOGGER.info("Running POJO strategy delete tests");
-
-            cache.remove(id);
-            cache.removeAll(personMap.keySet());
-
-            LOGGER.info("POJO strategy delete tests passed");
-        }
-    }
-
-    /** */
-    private IgniteConfiguration igniteConfig() throws IOException {
-        URL url = getClass().getClassLoader().getResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml");
-        String persistence = U.readFileToString(url.getFile(), "UTF-8");
-
-        KeyValuePersistenceSettings persistenceSettings = new KeyValuePersistenceSettings(persistence);
-
-        DataSource dataSource = new DataSource();
-        dataSource.setContactPoints(CassandraHelper.getContactPointsArray());
-        dataSource.setCredentials(new CassandraAdminCredentials());
-        dataSource.setLoadBalancingPolicy(new RoundRobinPolicy());
-
-        CassandraCacheStoreFactory<String, Person> storeFactory = new CassandraCacheStoreFactory<>();
-        storeFactory.setDataSource(dataSource);
-        storeFactory.setPersistenceSettings(persistenceSettings);
-
-        CacheConfiguration<String, Person> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("cache1");
-        cacheConfiguration.setReadThrough(true);
-        cacheConfiguration.setWriteThrough(true);
-        cacheConfiguration.setCacheStoreFactory(storeFactory);
-
-        IgniteConfiguration config = new IgniteConfiguration();
-        config.setCacheConfiguration(cacheConfiguration);
-
-        return config;
-    }
-
-    /** */
-    public static class PojoPerson {
-        /** */
-        private int id;
-
-        /** */
-        private String name;
-
-        /** */
-        public PojoPerson() {
-            // No-op.
-        }
-
-        /** */
-        public PojoPerson(int id, String name) {
-            this.id = id;
-            this.name = name;
-        }
-
-        /** */
-        public int getId() {
-            return id;
-        }
-
-        /** */
-        public String getName() {
-            return name;
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/LoadTestsCassandraArtifactsCreator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/LoadTestsCassandraArtifactsCreator.java
deleted file mode 100644
index 42cfd9d4b6185..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/LoadTestsCassandraArtifactsCreator.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests;
-
-import java.util.LinkedList;
-import java.util.List;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tests.utils.TestsHelper;
-
-/**
- * Recreates all required Cassandra database objects (keyspace, table, indexes) for load tests
- */
-public class LoadTestsCassandraArtifactsCreator {
-    /**
-     * Recreates Cassandra artifacts required for load tests
-     * @param args not used
-     */
-    public static void main(String[] args) {
-        try {
-            System.out.println("[INFO] Recreating Cassandra artifacts (keyspace, table, indexes) for load tests");
-
-            KeyValuePersistenceSettings perSettings =
-                    new KeyValuePersistenceSettings(TestsHelper.getLoadTestsPersistenceSettings());
-
-            System.out.println("[INFO] Dropping test keyspace: " + perSettings.getKeyspace());
-
-            try {
-                CassandraHelper.dropTestKeyspaces();
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to drop test keyspace: " + perSettings.getKeyspace(), e);
-            }
-
-            System.out.println("[INFO] Test keyspace '" + perSettings.getKeyspace() + "' was successfully dropped");
-
-            System.out.println("[INFO] Creating test keyspace: " + perSettings.getKeyspace());
-
-            try {
-                CassandraHelper.executeWithAdminCredentials(perSettings.getKeyspaceDDLStatement());
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to create test keyspace: " + perSettings.getKeyspace(), e);
-            }
-
-            System.out.println("[INFO] Test keyspace '" + perSettings.getKeyspace() + "' was successfully created");
-
-            System.out.println("[INFO] Creating test table: " + perSettings.getTable());
-
-            try {
-                CassandraHelper.executeWithAdminCredentials(perSettings.getTableDDLStatement(perSettings.getTable()));
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to create test table: " + perSettings.getTable(), e);
-            }
-
-            System.out.println("[INFO] Test table '" + perSettings.getTable() + "' was successfully created");
-
-            List<String> statements = perSettings.getIndexDDLStatements(perSettings.getTable());
-            if (statements == null)
-                statements = new LinkedList<>();
-
-            for (String statement : statements) {
-                System.out.println("[INFO] Creating test table index:");
-                System.out.println(statement);
-
-                try {
-                    CassandraHelper.executeWithAdminCredentials(statement);
-                }
-                catch (Throwable e) {
-                    throw new RuntimeException("Failed to create test table index", e);
-                }
-
-                System.out.println("[INFO] Test table index was successfully created");
-            }
-
-            System.out.println("[INFO] All required Cassandra artifacts were successfully recreated");
-        }
-        catch (Throwable e) {
-            System.out.println("[ERROR] Failed to recreate Cassandra artifacts");
-            e.printStackTrace(System.out);
-
-            if (e instanceof RuntimeException)
-                throw (RuntimeException)e;
-            else
-                throw new RuntimeException(e);
-        }
-        finally {
-            CassandraHelper.releaseCassandraResources();
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Generator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Generator.java
deleted file mode 100644
index 0c18bc0e2a43c..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Generator.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-/**
- * Generator abstraction which could be used by tests to generate next key/value pair for Ignite cache
- * from provided int number (which sequentially incremented in load test driver loop).
- */
-public interface Generator {
-    /** */
-    public Object generate(long i);
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/IntGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
deleted file mode 100644
index 21490f6cce699..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/IntGenerator.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-/**
- * Implementation of {@link org.apache.ignite.tests.load.Generator} generating {@link Integer} instance.
- */
-public class IntGenerator implements Generator {
-    /** {@inheritDoc} */
-    @Override public Object generate(long i) {
-        long val = i / 10000;
-
-        while (val > Integer.MAX_VALUE)
-            val /= 2;
-
-        return (int)val;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
deleted file mode 100644
index a244da1f8bde7..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LoadTestDriver.java
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-import java.lang.reflect.Constructor;
-import java.util.LinkedList;
-import java.util.List;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.cache.store.cassandra.common.SystemHelper;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.logging.log4j.Logger;
-
-/**
- * Basic load test driver to be inherited by specific implementation for particular use-case.
- */
-public abstract class LoadTestDriver {
-    /** Number of attempts to setup load test */
-    private static final int NUMBER_OF_SETUP_ATTEMPTS = 10;
-
-    /** Timeout between load test setup attempts */
-    private static final int SETUP_ATTEMPT_TIMEOUT = 1000;
-
-    /** */
-    public void runTest(String testName, Class<? extends Worker> clazz, String logName) {
-        logger().info("Running " + testName + " test");
-
-        Object cfg = null;
-
-        int attempt;
-
-        logger().info("Setting up load tests driver");
-
-        for (attempt = 0; attempt < NUMBER_OF_SETUP_ATTEMPTS; attempt++) {
-            try {
-                cfg = setup(logName);
-                break;
-            }
-            catch (Throwable e) {
-                logger().error((attempt + 1) + " attempt to setup load test '" + testName + "' failed", e);
-            }
-
-            if (attempt + 1 != NUMBER_OF_SETUP_ATTEMPTS) {
-                logger().info("Sleeping for " + SETUP_ATTEMPT_TIMEOUT + " seconds before trying next attempt " +
-                        "to setup '" + testName + "' load test");
-
-                try {
-                    Thread.sleep(SETUP_ATTEMPT_TIMEOUT);
-                }
-                catch (InterruptedException ignored) {
-                    // No-op.
-                }
-            }
-        }
-
-        if (cfg == null && attempt == NUMBER_OF_SETUP_ATTEMPTS) {
-            throw new RuntimeException("All " + NUMBER_OF_SETUP_ATTEMPTS + " attempts to setup load test '" +
-                    testName + "' have failed");
-        }
-
-        // calculates host unique prefix based on its subnet IP address
-        long hostUniquePrefix = getHostUniquePrefix();
-
-        logger().info("Load tests driver setup successfully completed");
-
-        try {
-
-            List<Worker> workers = new LinkedList<>();
-            long startPosition = 0;
-
-            logger().info("Starting workers");
-
-            for (int i = 0; i < TestsHelper.getLoadTestsThreadsCount(); i++) {
-                Worker worker = createWorker(clazz, cfg,
-                    hostUniquePrefix + startPosition,
-                    hostUniquePrefix + startPosition + 100000000);
-                workers.add(worker);
-                worker.setName(testName + "-worker-" + i);
-                worker.start();
-                startPosition += 100000001;
-            }
-
-            logger().info("Workers started");
-            logger().info("Waiting for workers to complete");
-
-            List<String> failedWorkers = new LinkedList<>();
-
-            for (Worker worker : workers) {
-                boolean failed = false;
-
-                try {
-                    worker.join();
-                }
-                catch (Throwable e) {
-                    logger().error("Worker " + worker.getName() + " waiting interrupted", e);
-                    failed = true;
-                }
-
-                if (failed || worker.isFailed()) {
-                    failedWorkers.add(worker.getName());
-                    logger().info("Worker " + worker.getName() + " execution failed");
-                }
-                else
-                    logger().info("Worker " + worker.getName() + " successfully completed");
-            }
-
-            printTestResultsHeader(testName, failedWorkers);
-            printTestResultsStatistics(testName, workers);
-        }
-        finally {
-            tearDown(cfg);
-        }
-    }
-
-    /** */
-    protected abstract Logger logger();
-
-    /** */
-    protected abstract Object setup(String logName);
-
-    /** */
-    protected void tearDown(Object obj) {
-    }
-
-    /** */
-    @SuppressWarnings("unchecked")
-    private Worker createWorker(Class clazz, Object cfg, long startPosition, long endPosition) {
-        try {
-            Class cfgCls = cfg instanceof Ignite ? Ignite.class : CacheStore.class;
-
-            Constructor ctor = clazz.getConstructor(cfgCls, long.class, long.class);
-
-            return (Worker)ctor.newInstance(cfg, startPosition, endPosition);
-        }
-        catch (Throwable e) {
-            logger().error("Failed to instantiate worker of class '" + clazz.getName() + "'", e);
-            throw new RuntimeException("Failed to instantiate worker of class '" + clazz.getName() + "'", e);
-        }
-    }
-
-    /** */
-    private void printTestResultsHeader(String testName, List<String> failedWorkers) {
-        if (failedWorkers.isEmpty()) {
-            logger().info(testName + " test execution successfully completed.");
-            return;
-        }
-
-        if (failedWorkers.size() == TestsHelper.getLoadTestsThreadsCount()) {
-            logger().error(testName + " test execution totally failed.");
-            return;
-        }
-
-        String strFailedWorkers = "";
-
-        for (String workerName : failedWorkers) {
-            if (!strFailedWorkers.isEmpty())
-                strFailedWorkers += ", ";
-
-            strFailedWorkers += workerName;
-        }
-
-        logger().warn(testName + " test execution completed, but " + failedWorkers.size() + " of " +
-            TestsHelper.getLoadTestsThreadsCount() + " workers failed. Failed workers: " + strFailedWorkers);
-    }
-
-    /** */
-    @SuppressWarnings("StringBufferReplaceableByString")
-    private void printTestResultsStatistics(String testName, List<Worker> workers) {
-        long cnt = 0;
-        long errCnt = 0;
-        long speed = 0;
-
-        for (Worker worker : workers) {
-            cnt += worker.getMsgProcessed();
-            errCnt += worker.getErrorsCount();
-            speed += worker.getSpeed();
-        }
-
-        float errPercent = errCnt == 0 ?
-            0 :
-            cnt + errCnt == 0 ? 0 : (float)(errCnt * 100 ) / (float)(cnt + errCnt);
-
-        StringBuilder builder = new StringBuilder();
-        builder.append(SystemHelper.LINE_SEPARATOR);
-        builder.append("-------------------------------------------------");
-        builder.append(SystemHelper.LINE_SEPARATOR);
-        builder.append(testName).append(" test statistics").append(SystemHelper.LINE_SEPARATOR);
-        builder.append(testName).append(" messages: ").append(cnt).append(SystemHelper.LINE_SEPARATOR);
-        builder.append(testName).append(" errors: ").append(errCnt).append(", ").
-                append(String.format("%.2f", errPercent).replace(",", ".")).
-                append("%").append(SystemHelper.LINE_SEPARATOR);
-        builder.append(testName).append(" speed: ").append(speed).append(" msg/sec").append(SystemHelper.LINE_SEPARATOR);
-        builder.append("-------------------------------------------------");
-
-        logger().info(builder.toString());
-    }
-
-    /** */
-    private long getHostUniquePrefix() {
-        String[] parts = SystemHelper.HOST_IP.split("\\.");
-
-        if (parts[2].equals("0"))
-            parts[2] = "777";
-
-        if (parts[3].equals("0"))
-            parts[3] = "777";
-
-        long part3 = Long.parseLong(parts[2]);
-        long part4 = Long.parseLong(parts[3]);
-
-        if (part3 < 10)
-            part3 *= 100;
-        else if (part4 < 100)
-            part3 *= 10;
-
-        if (part4 < 10)
-            part4 *= 100;
-        else if (part4 < 100)
-            part4 *= 10;
-
-        return (part4 * 100000000000000L) + (part3 * 100000000000L) + Thread.currentThread().getId();
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LongGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LongGenerator.java
deleted file mode 100644
index 0398f98cbe2cf..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/LongGenerator.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-/**
- * Implementation of {@link org.apache.ignite.tests.load.Generator} generating {@link Long} instance.
- */
-public class LongGenerator implements Generator {
-    /** {@inheritDoc} */
-    @Override public Object generate(long i) {
-        return i;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
deleted file mode 100644
index 054c1661ac1f3..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonGenerator.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-import java.util.Date;
-import java.util.LinkedList;
-import java.util.List;
-import org.apache.ignite.tests.pojos.Person;
-
-/**
- * Implementation of {@link Generator} generating {@link Person} instance.
- */
-public class PersonGenerator implements Generator {
-    /** */
-    private static final Date DATE = new Date();
-
-    /** */
-    private static final List<String> PHONES = new LinkedList<String>();
-
-    static {
-        PHONES.add("1234567");
-        PHONES.add("7654321");
-        PHONES.add("1289054");
-    }
-
-    /** {@inheritDoc} */
-    @Override public Object generate(long i) {
-        return new Person(i, Long.toString(i), Long.toString(i), (short)(i % 100), i % 2 == 0, i, i, DATE, PHONES);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java
deleted file mode 100644
index a11e0d81d4318..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/PersonIdGenerator.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-import org.apache.ignite.tests.pojos.PersonId;
-
-/**
- * Implementation of {@link org.apache.ignite.tests.load.Generator} generating
- * {@link org.apache.ignite.tests.pojos.PersonId} instance.
- */
-public class PersonIdGenerator implements Generator {
-    /** {@inheritDoc} */
-    @Override public Object generate(long i) {
-        return new PersonId(Long.toString(i), Long.toString(i), i);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/StringGenerator.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/StringGenerator.java
deleted file mode 100644
index cfaf34ae50def..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/StringGenerator.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-/**
- * Implementation of {@link org.apache.ignite.tests.load.Generator} generating {@link String} instance.
- */
-public class StringGenerator implements Generator {
-    /** {@inheritDoc} */
-    @Override public Object generate(long i) {
-        return Long.toString(i);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Worker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Worker.java
deleted file mode 100644
index 0aa20c0b2de3a..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/Worker.java
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load;
-
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.cache.store.cassandra.common.SystemHelper;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.internal.util.IgniteUtils;
-import org.apache.ignite.tests.utils.TestsHelper;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-
-/**
- * Worker thread abstraction to be inherited by specific load test implementation
- */
-public abstract class Worker extends Thread {
-    /** */
-    private long testStartTime;
-
-    /** */
-    boolean warmup = TestsHelper.getLoadTestsWarmupPeriod() != 0;
-
-    /** */
-    private volatile long warmupStartTime;
-
-    /** */
-    private volatile long warmupFinishTime;
-
-    /** */
-    private volatile long startTime;
-
-    /** */
-    private volatile long finishTime;
-
-    /** */
-    private volatile long warmupMsgProcessed;
-
-    /** */
-    private volatile long warmupSleepCnt;
-
-    /** */
-    private volatile long msgProcessed;
-
-    /** */
-    private volatile long msgFailed;
-
-    /** */
-    private volatile long sleepCnt;
-
-    /** */
-    private Throwable executionError;
-
-    /** */
-    private long statReportedTime;
-
-    /** */
-    private CacheStore cacheStore;
-
-    /** */
-    private Ignite ignite;
-
-    /** */
-    private IgniteCache igniteCache;
-
-    /** */
-    private Logger log;
-
-    /** */
-    private long startPosition;
-
-    /** */
-    private long endPosition;
-
-    /** */
-    public Worker(CacheStore cacheStore, long startPosition, long endPosition) {
-        this.cacheStore = cacheStore;
-        this.log = LogManager.getLogger(loggerName());
-        this.startPosition = startPosition;
-        this.endPosition = endPosition;
-    }
-
-    /** */
-    public Worker(Ignite ignite, long startPosition, long endPosition) {
-        this.ignite = ignite;
-        this.log = LogManager.getLogger(loggerName());
-        this.startPosition = startPosition;
-        this.endPosition = endPosition;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void run() {
-        try {
-            if (ignite != null)
-                igniteCache = ignite.getOrCreateCache(new CacheConfiguration(TestsHelper.getLoadTestsCacheName()));
-
-            execute();
-        }
-        catch (Throwable e) {
-            executionError = e;
-            throw new RuntimeException("Test execution abnormally terminated", e);
-        }
-        finally {
-            reportTestCompletion();
-        }
-    }
-
-    /** */
-    public boolean isFailed() {
-        return executionError != null;
-    }
-
-    /** */
-    public long getSpeed() {
-        if (msgProcessed == 0)
-            return 0;
-
-        long finish = finishTime != 0 ? finishTime : System.currentTimeMillis();
-        long duration = (finish - startTime - sleepCnt * TestsHelper.getLoadTestsRequestsLatency()) / 1000;
-
-        return duration == 0 ? msgProcessed : msgProcessed / duration;
-    }
-
-    /** */
-    public long getErrorsCount() {
-        return msgFailed;
-    }
-
-    /** */
-    public float getErrorsPercent() {
-        if (msgFailed == 0)
-            return 0;
-
-        return msgProcessed + msgFailed == 0 ? 0 : (float)(msgFailed * 100 ) / (float)(msgProcessed + msgFailed);
-    }
-
-    /** */
-    public long getMsgCountTotal() {
-        return warmupMsgProcessed + msgProcessed;
-    }
-
-    /** */
-    public long getWarmupMsgProcessed() {
-        return warmupMsgProcessed;
-    }
-
-    /** */
-    public long getMsgProcessed() {
-        return msgProcessed;
-    }
-
-    /** */
-    protected abstract String loggerName();
-
-    /** */
-    protected abstract boolean batchMode();
-
-    /** */
-    protected void process(CacheStore cacheStore, CacheEntryImpl entry) {
-        throw new UnsupportedOperationException("Single message processing is not supported");
-    }
-
-    /** */
-    protected void process(IgniteCache cache, Object key, Object val) {
-        throw new UnsupportedOperationException("Single message processing is not supported");
-    }
-
-    /** */
-    protected void process(CacheStore cacheStore, Collection<CacheEntryImpl> entries) {
-        throw new UnsupportedOperationException("Batch processing is not supported");
-    }
-
-    /** */
-    protected void process(IgniteCache cache, Map map) {
-        throw new UnsupportedOperationException("Batch processing is not supported");
-    }
-
-    /** */
-    @SuppressWarnings("unchecked")
-    private void execute() throws InterruptedException {
-        testStartTime = System.currentTimeMillis();
-
-        log.info("Test execution started");
-
-        if (warmup)
-            log.info("Warm up period started");
-
-        warmupStartTime = warmup ? testStartTime : 0;
-        startTime = !warmup ? testStartTime : 0;
-
-        statReportedTime = testStartTime;
-
-        long cntr = startPosition;
-        Object key = TestsHelper.generateLoadTestsKey(cntr);
-        Object val = TestsHelper.generateLoadTestsValue(cntr);
-        List<CacheEntryImpl> batchList = new ArrayList<>(TestsHelper.getBulkOperationSize());
-        Map batchMap = new HashMap(TestsHelper.getBulkOperationSize());
-
-        int execTime = TestsHelper.getLoadTestsWarmupPeriod() + TestsHelper.getLoadTestsExecutionTime();
-
-        try {
-            while (true) {
-                if (System.currentTimeMillis() - testStartTime > execTime)
-                    break;
-
-                if (warmup && System.currentTimeMillis() - testStartTime > TestsHelper.getLoadTestsWarmupPeriod()) {
-                    warmupFinishTime = System.currentTimeMillis();
-                    startTime = warmupFinishTime;
-                    statReportedTime = warmupFinishTime;
-                    warmup = false;
-                    log.info("Warm up period completed");
-                }
-
-                if (!batchMode()) {
-                    if (cacheStore != null)
-                        doWork(new CacheEntryImpl(key, val));
-                    else
-                        doWork(key, val);
-                }
-                else if (batchList.size() == TestsHelper.getBulkOperationSize() ||
-                    batchMap.size() == TestsHelper.getBulkOperationSize()) {
-                    if (cacheStore != null)
-                        doWork(batchList);
-                    else
-                        doWork(batchMap);
-
-                    batchMap.clear();
-                    batchList.clear();
-                }
-
-                if (cntr == endPosition)
-                    cntr = startPosition;
-                else
-                    cntr++;
-
-                key = TestsHelper.generateLoadTestsKey(cntr);
-                val = TestsHelper.generateLoadTestsValue(cntr);
-
-                if (batchMode()) {
-                    if (cacheStore != null)
-                        batchList.add(new CacheEntryImpl(key, val));
-                    else
-                        batchMap.put(key, val);
-                }
-
-                reportStatistics();
-            }
-        }
-        finally {
-            warmupFinishTime = warmupFinishTime != 0 ? warmupFinishTime : System.currentTimeMillis();
-            finishTime = System.currentTimeMillis();
-        }
-    }
-
-    /** */
-    private void doWork(CacheEntryImpl entry) {
-        try {
-            process(cacheStore, entry);
-            updateMetrics(1);
-        }
-        catch (Throwable e) {
-            log.error("Failed to perform single operation", e);
-            updateErrorMetrics(1);
-        }
-    }
-
-    /** */
-    private void doWork(Object key, Object val) {
-        try {
-            process(igniteCache, key, val);
-            updateMetrics(1);
-        }
-        catch (Throwable e) {
-            log.error("Failed to perform single operation", e);
-            updateErrorMetrics(1);
-        }
-    }
-
-    /** */
-    private void doWork(Collection<CacheEntryImpl> entries) {
-        try {
-            process(cacheStore, entries);
-            updateMetrics(entries.size());
-        }
-        catch (Throwable e) {
-            log.error("Failed to perform batch operation", e);
-            updateErrorMetrics(entries.size());
-        }
-    }
-
-    /** */
-    private void doWork(Map entries) {
-        try {
-            process(igniteCache, entries);
-            updateMetrics(entries.size());
-        }
-        catch (Throwable e) {
-            log.error("Failed to perform batch operation", e);
-            updateErrorMetrics(entries.size());
-        }
-    }
-
-    /** */
-    private long getWarmUpSpeed() {
-        if (warmupMsgProcessed == 0)
-            return 0;
-
-        long finish = warmupFinishTime != 0 ? warmupFinishTime : System.currentTimeMillis();
-        long duration = (finish - warmupStartTime - warmupSleepCnt * TestsHelper.getLoadTestsRequestsLatency()) / 1000;
-
-        return duration == 0 ? warmupMsgProcessed : warmupMsgProcessed / duration;
-    }
-
-    /** */
-    private void updateMetrics(int itemsProcessed) {
-        if (warmup)
-            warmupMsgProcessed += itemsProcessed;
-        else
-            msgProcessed += itemsProcessed;
-
-        if (TestsHelper.getLoadTestsRequestsLatency() > 0) {
-            try {
-                Thread.sleep(TestsHelper.getLoadTestsRequestsLatency());
-
-                if (warmup)
-                    warmupSleepCnt++;
-                else
-                    sleepCnt++;
-            }
-            catch (Throwable ignored) {
-            }
-        }
-    }
-
-    /**
-     * TODO IGNITE-1371 Comment absent.
-     *
-     * @param itemsFailed Failed item.
-     */
-    private void updateErrorMetrics(int itemsFailed) {
-        if (!warmup)
-            msgFailed += itemsFailed;
-    }
-
-    /** */
-    private void reportStatistics() {
-        // statistics should be reported only every 30 seconds
-        if (System.currentTimeMillis() - statReportedTime < 30000)
-            return;
-
-        statReportedTime = System.currentTimeMillis();
-
-        int completed = warmup ?
-                (int)(statReportedTime - warmupStartTime) * 100 / TestsHelper.getLoadTestsWarmupPeriod() :
-                (int)(statReportedTime - startTime) * 100 / TestsHelper.getLoadTestsExecutionTime();
-
-        if (completed > 100)
-            completed = 100;
-
-        if (warmup) {
-            log.info("Warm up messages processed " + warmupMsgProcessed + ", " +
-                "speed " + getWarmUpSpeed() + " msg/sec, " + completed + "% completed");
-        }
-        else {
-            log.info("Messages processed " + msgProcessed + ", " +
-                "speed " + getSpeed() + " msg/sec, " + completed + "% completed, " +
-                "errors " + msgFailed + " / " + String.format("%.2f", getErrorsPercent()).replace(",", ".") + "%");
-        }
-    }
-
-    /** */
-    private void reportTestCompletion() {
-        StringBuilder builder = new StringBuilder();
-
-        if (executionError != null)
-            builder.append("Test execution abnormally terminated. ");
-        else
-            builder.append("Test execution successfully completed. ");
-
-        builder.append("Statistics: ").append(SystemHelper.LINE_SEPARATOR);
-        builder.append("Start time: ")
-            .append(IgniteUtils.SHORT_DATE_FMT.format(Instant.ofEpochMilli(testStartTime)))
-            .append(SystemHelper.LINE_SEPARATOR);
-        builder.append("Finish time: ")
-            .append(IgniteUtils.SHORT_DATE_FMT.format(Instant.ofEpochMilli(finishTime)))
-            .append(SystemHelper.LINE_SEPARATOR);
-        builder.append("Duration: ").append((finishTime - testStartTime) / 1000).append(" sec")
-            .append(SystemHelper.LINE_SEPARATOR);
-
-        if (TestsHelper.getLoadTestsWarmupPeriod() > 0) {
-            builder.append("Warm up period: ").append(TestsHelper.getLoadTestsWarmupPeriod() / 1000)
-                .append(" sec").append(SystemHelper.LINE_SEPARATOR);
-            builder.append("Warm up processed messages: ").append(warmupMsgProcessed).append(SystemHelper.LINE_SEPARATOR);
-            builder.append("Warm up processing speed: ").append(getWarmUpSpeed())
-                .append(" msg/sec").append(SystemHelper.LINE_SEPARATOR);
-        }
-
-        builder.append("Processed messages: ").append(msgProcessed).append(SystemHelper.LINE_SEPARATOR);
-        builder.append("Processing speed: ").append(getSpeed()).append(" msg/sec").append(SystemHelper.LINE_SEPARATOR);
-        builder.append("Errors: ").append(msgFailed).append(" / ").
-                append(String.format("%.2f", getErrorsPercent()).replace(",", ".")).append("%");
-
-        if (executionError != null)
-            log.error(builder.toString(), executionError);
-        else
-            log.info(builder.toString());
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java
deleted file mode 100644
index 38f0db86674fd..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkReadWorker.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.cassandra;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.load.Worker;
-import org.apache.ignite.tests.utils.TestsHelper;
-
-/**
- * Cassandra direct load tests worker for bulk read operation CacheStore.load
- */
-public class BulkReadWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "CassandraBulkReadLoadTest";
-
-    /** */
-    private List<Object> keys = new ArrayList<>(TestsHelper.getBulkOperationSize());
-
-    /** */
-    public BulkReadWorker(CacheStore cacheStore, long startPosition, long endPosition) {
-        super(cacheStore, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(CacheStore cacheStore, Collection<CacheEntryImpl> entries) {
-        keys.clear();
-
-        for (CacheEntryImpl entry : entries)
-            keys.add(entry.getKey());
-
-        cacheStore.loadAll(keys);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java
deleted file mode 100644
index c71728f767320..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/BulkWriteWorker.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.cassandra;
-
-import java.util.Collection;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.load.Worker;
-
-/**
- * Cassandra direct load tests worker for bulk write operation CacheStore.writeAll
- */
-public class BulkWriteWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "CassandraBulkWriteLoadTest";
-
-    /** */
-    public BulkWriteWorker(CacheStore cacheStore, long startPosition, long endPosition) {
-        super(cacheStore, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(CacheStore cacheStore, Collection<CacheEntryImpl> entries) {
-        cacheStore.writeAll(entries);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java
deleted file mode 100644
index 051b55fb49e95..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/ReadWorker.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.cassandra;
-
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.load.Worker;
-
-/**
- * Cassandra direct load tests worker for read operation CacheStore.load
- */
-public class ReadWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "CassandraReadLoadTest";
-
-    /** */
-    public ReadWorker(CacheStore cacheStore, long startPosition, long endPosition) {
-        super(cacheStore, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(CacheStore cacheStore, CacheEntryImpl entry) {
-        cacheStore.load(entry.getKey());
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java
deleted file mode 100644
index 2b10bcdcd1698..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/WriteWorker.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.cassandra;
-
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.load.Worker;
-
-/**
- * Cassandra direct load tests worker for write operation CacheStore.write
- */
-public class WriteWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "CassandraWriteLoadTest";
-
-    /** */
-    public WriteWorker(CacheStore cacheStore, long startPosition, long endPosition) {
-        super(cacheStore, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(CacheStore cacheStore, CacheEntryImpl entry) {
-        cacheStore.write(entry);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java
deleted file mode 100644
index 74204eed6e2a5..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/cassandra/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains load tests workers implementation for Cassandra cluster
- */
-
-package org.apache.ignite.tests.load.cassandra;
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java
deleted file mode 100644
index c20d0cef220bd..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkReadWorker.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.ignite;
-
-import java.util.Map;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.tests.load.Worker;
-
-/**
- * Ignite load tests worker for bulk read operation CacheStore.loadAll
- */
-public class BulkReadWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "IgniteBulkReadLoadTest";
-
-    /** */
-    public BulkReadWorker(Ignite ignite, long startPosition, long endPosition) {
-        super(ignite, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(IgniteCache cache, Map entries) {
-        cache.getAll(entries.keySet());
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java
deleted file mode 100644
index 1ce7be3622fc3..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/BulkWriteWorker.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.ignite;
-
-import java.util.Map;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.tests.load.Worker;
-
-/**
- * Ignite load tests worker for bulk read operation CacheStore.writeAll
- */
-public class BulkWriteWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "IgniteBulkWriteLoadTest";
-
-    /** */
-    public BulkWriteWorker(Ignite ignite, long startPosition, long endPosition) {
-        super(ignite, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return true;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(IgniteCache cache, Map entries) {
-        cache.putAll(entries);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java
deleted file mode 100644
index 35f7d3959c2a5..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/ReadWorker.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.ignite;
-
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.tests.load.Worker;
-
-/**
- * Ignite load tests worker for read operation CacheStore.load
- */
-public class ReadWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "IgniteReadLoadTest";
-
-    /** */
-    public ReadWorker(Ignite ignite, long startPosition, long endPosition) {
-        super(ignite, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(IgniteCache cache, Object key, Object val) {
-        cache.get(key);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java
deleted file mode 100644
index bed709973597e..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/WriteWorker.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.load.ignite;
-
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.tests.load.Worker;
-
-/**
- * Ignite load tests worker for write operation CacheStore.write
- */
-public class WriteWorker extends Worker {
-    /** */
-    public static final String LOGGER_NAME = "IgniteWriteLoadTest";
-
-    /** */
-    public WriteWorker(Ignite ignite, long startPosition, long endPosition) {
-        super(ignite, startPosition, endPosition);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected String loggerName() {
-        return LOGGER_NAME;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean batchMode() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override protected void process(IgniteCache cache, Object key, Object val) {
-        cache.put(key, val);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java
deleted file mode 100644
index 2beab56c5ba7b..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/ignite/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains load tests workers implementation for Ignite-Cassandra cluster
- */
-
-package org.apache.ignite.tests.load.ignite;
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/package-info.java
deleted file mode 100644
index 890e3dffd7a10..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/load/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains load tests classes
- */
-
-package org.apache.ignite.tests.load;
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/package-info.java
deleted file mode 100644
index 52a34daea09bd..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains unit tests
- */
-
-package org.apache.ignite.tests;
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Person.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Person.java
deleted file mode 100644
index 2bec7071c76cb..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Person.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.pojos;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Date;
-import java.util.List;
-
-/**
- * Simple POJO which could be stored as a value in Ignite cache
- */
-public class Person implements Externalizable {
-    /** */
-    private long personNum;
-
-    /** */
-    private String firstName;
-
-    /** */
-    private String lastName;
-
-    /** */
-    private String fullName;
-
-    /** */
-    private short age;
-
-    /** */
-    private boolean married;
-
-    /** */
-    private long height;
-
-    /** */
-    private float weight;
-
-    /** */
-    private Date birthDate;
-
-    /** */
-    private List<String> phones;
-
-    /** */
-    public Person() {
-    }
-
-    /** */
-    public Person(long personNum, String firstName, String lastName, short age, boolean married,
-        long height, float weight, Date birthDate, List<String> phones) {
-        this.personNum = personNum;
-        this.firstName = firstName;
-        this.lastName = lastName;
-        this.age = age;
-        this.married = married;
-        this.height = height;
-        this.weight = weight;
-        this.birthDate = birthDate;
-        this.phones = phones;
-    }
-
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeLong(personNum);
-        out.writeObject(firstName);
-        out.writeObject(lastName);
-        out.writeShort(age);
-        out.writeBoolean(married);
-        out.writeLong(height);
-        out.writeFloat(weight);
-        out.writeObject(birthDate);
-        out.writeObject(phones);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        personNum = in.readLong();
-        firstName = (String)in.readObject();
-        lastName = (String)in.readObject();
-        age = in.readShort();
-        married = in.readBoolean();
-        height = in.readLong();
-        weight = in.readFloat();
-        birthDate = (Date)in.readObject();
-        phones = (List<String>)in.readObject();
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("SimplifiableIfStatement")
-    @Override public boolean equals(Object obj) {
-        if (obj == null || !(obj instanceof Person))
-            return false;
-
-        Person person = (Person)obj;
-
-        if (personNum != person.personNum)
-            return false;
-
-        if ((firstName != null && !firstName.equals(person.firstName)) ||
-            (person.firstName != null && !person.firstName.equals(firstName)))
-            return false;
-
-        if ((lastName != null && !lastName.equals(person.lastName)) ||
-            (person.lastName != null && !person.lastName.equals(lastName)))
-            return false;
-
-        if ((birthDate != null && !birthDate.equals(person.birthDate)) ||
-            (person.birthDate != null && !person.birthDate.equals(birthDate)))
-            return false;
-
-        if ((phones != null && !phones.equals(person.phones)) ||
-            (person.phones != null && !person.phones.equals(phones)))
-            return false;
-
-        return age == person.age && married == person.married &&
-            height == person.height && weight == person.weight;
-    }
-
-    /** */
-    @SuppressWarnings("SimplifiableIfStatement")
-    public boolean equalsPrimitiveFields(Object obj) {
-        if (obj == null || !(obj instanceof Person))
-            return false;
-
-        Person person = (Person)obj;
-
-        if (personNum != person.personNum)
-            return false;
-
-        if ((firstName != null && !firstName.equals(person.firstName)) ||
-            (person.firstName != null && !person.firstName.equals(firstName)))
-            return false;
-
-        if ((lastName != null && !lastName.equals(person.lastName)) ||
-            (person.lastName != null && !person.lastName.equals(lastName)))
-            return false;
-
-        if ((birthDate != null && !birthDate.equals(person.birthDate)) ||
-            (person.birthDate != null && !person.birthDate.equals(birthDate)))
-            return false;
-
-        return age == person.age && married == person.married &&
-            height == person.height && weight == person.weight;
-    }
-
-    /** */
-    public void setPersonNumber(long personNum) {
-        this.personNum = personNum;
-    }
-
-    /** */
-    public long getPersonNumber() {
-        return personNum;
-    }
-
-    /** */
-    public void setFirstName(String name) {
-        firstName = name;
-        fullName = firstName + " " + lastName;
-    }
-
-    /** */
-    public String getFirstName() {
-        return firstName;
-    }
-
-    /** */
-    public void setLastName(String name) {
-        lastName = name;
-        fullName = firstName + " " + lastName;
-    }
-
-    /** */
-    public String getLastName() {
-        return lastName;
-    }
-
-    /** */
-    public String getFullName() {
-        return fullName;
-    }
-
-    /** */
-    public void setAge(short age) {
-        this.age = age;
-    }
-
-    /** */
-    public short getAge() {
-        return age;
-    }
-
-    /** */
-    public void setMarried(boolean married) {
-        this.married = married;
-    }
-
-    /** */
-    public boolean getMarried() {
-        return married;
-    }
-
-    /** */
-    public void setHeight(long height) {
-        this.height = height;
-    }
-
-    /** */
-    public long getHeight() {
-        return height;
-    }
-
-    /** */
-    public void setWeight(float weight) {
-        this.weight = weight;
-    }
-
-    /** */
-    public float getWeight() {
-        return weight;
-    }
-
-    /** */
-    public void setBirthDate(Date date) {
-        birthDate = date;
-    }
-
-    /** */
-    public Date getBirthDate() {
-        return birthDate;
-    }
-
-    /** */
-    public void setPhones(List<String> phones) {
-        this.phones = phones;
-    }
-
-    /** */
-    public List<String> getPhones() {
-        return phones;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/PersonId.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/PersonId.java
deleted file mode 100644
index 530e09b3da073..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/PersonId.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.pojos;
-
-import java.io.Serializable;
-
-/**
- * Simple POJO which could be stored as a key in Ignite cache
- */
-public class PersonId implements Serializable {
-    /** */
-    private String companyCode;
-
-    /** */
-    private String departmentCode;
-
-    /** */
-    private long personNum;
-
-    /** */
-    public PersonId() {
-    }
-
-    /** */
-    public PersonId(String companyCode, String departmentCode, long personNum) {
-        this.companyCode = companyCode;
-        this.departmentCode = departmentCode;
-        this.personNum = personNum;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("SimplifiableIfStatement")
-    @Override public boolean equals(Object obj) {
-        if (obj == null || !(obj instanceof PersonId))
-            return false;
-
-        PersonId id = (PersonId)obj;
-
-        if ((companyCode != null && !companyCode.equals(id.companyCode)) ||
-            (id.companyCode != null && !id.companyCode.equals(companyCode)))
-            return false;
-
-        if ((companyCode != null && !companyCode.equals(id.companyCode)) ||
-            (id.companyCode != null && !id.companyCode.equals(companyCode)))
-            return false;
-
-        return personNum == id.personNum;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        String code = (companyCode == null ? "" : companyCode) +
-            (departmentCode == null ? "" : departmentCode) +
-                personNum;
-
-        return code.hashCode();
-    }
-
-    /** */
-    public void setCompanyCode(String code) {
-        companyCode = code;
-    }
-
-    /** */
-    public String getCompanyCode() {
-        return companyCode;
-    }
-
-    /** */
-    public void setDepartmentCode(String code) {
-        departmentCode = code;
-    }
-
-    /** */
-    public String getDepartmentCode() {
-        return departmentCode;
-    }
-
-    /** */
-    public void setPersonNumber(long personNum) {
-        this.personNum = personNum;
-    }
-
-    /** */
-    public long getPersonNumber() {
-        return personNum;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Product.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Product.java
deleted file mode 100644
index acdb10c922d49..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/Product.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.pojos;
-
-/**
- * Simple POJO to store information about product
- */
-public class Product {
-    /** */
-    private long id;
-
-    /** */
-    private String type;
-
-    /** */
-    private String title;
-
-    /** */
-    private String description;
-
-    /** */
-    private float price;
-
-    /** */
-    public Product() {
-    }
-
-    /** */
-    public Product(long id, String type, String title, String description, float price) {
-        this.id = id;
-        this.type = type;
-        this.title = title;
-        this.description = description;
-        this.price = price;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return ((Long)id).hashCode();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object obj) {
-        return obj instanceof Product && id == ((Product)obj).id;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return id + ", " + price + ", " + type + ", " + title + ", " + description;
-    }
-
-    /** */
-    public void setId(long id) {
-        this.id = id;
-    }
-
-    /** */
-    public long getId() {
-        return id;
-    }
-
-    /** */
-    public void setType(String type) {
-        this.type = type;
-    }
-
-    /** */
-    public String getType() {
-        return type;
-    }
-
-    /** */
-    public void setTitle(String title) {
-        this.title = title;
-    }
-
-    /** */
-    public String getTitle() {
-        return title;
-    }
-
-    /** */
-    public void setDescription(String description) {
-        this.description = description;
-    }
-
-    /** */
-    public String getDescription() {
-        return description;
-    }
-
-    /** */
-    public void setPrice(float price) {
-        this.price = price;
-    }
-
-    /** */
-    public float getPrice() {
-        return price;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/ProductOrder.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/ProductOrder.java
deleted file mode 100644
index 0c7ba679cbd06..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/ProductOrder.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.pojos;
-
-import java.time.Instant;
-import java.time.ZoneId;
-import java.time.format.DateTimeFormatter;
-
-/**
- * Simple POJO to store information about product order
- */
-public class ProductOrder {
-    /** */
-    private static final DateTimeFormatter FORMAT =
-        DateTimeFormatter.ofPattern("MM/dd/yyyy/S").withZone(ZoneId.systemDefault());
-
-    /** */
-    private static final DateTimeFormatter FULL_FORMAT =
-        DateTimeFormatter.ofPattern("MM/dd/yyyy HH:mm:ss:S").withZone(ZoneId.systemDefault());
-
-    /** */
-    private long id;
-
-    /** */
-    private long productId;
-
-    /** */
-    private Instant date;
-
-    /** */
-    private int amount;
-
-    /** */
-    private float price;
-
-    /** */
-    public ProductOrder() {
-    }
-
-    /** */
-    public ProductOrder(long id, Product product, Instant date, int amount) {
-        this(id, product.getId(), product.getPrice(), date, amount);
-    }
-
-    /** */
-    public ProductOrder(long id, long productId, float productPrice, Instant date, int amount) {
-        this.id = id;
-        this.productId = productId;
-        this.date = date;
-        this.amount = amount;
-        this.price = productPrice * amount;
-
-        // if user ordered more than 10 items provide 5% discount
-        if (amount > 10)
-            price *= 0.95F;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        return ((Long)id).hashCode();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object obj) {
-        return obj instanceof ProductOrder && id == ((ProductOrder)obj).id;
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return id + ", " + productId + ", " + FULL_FORMAT.format(date) + ", " + getDayMillisecond() + ", " + amount + ", " + price;
-    }
-
-    /** */
-    public void setId(long id) {
-        this.id = id;
-    }
-
-    /** */
-    public long getId() {
-        return id;
-    }
-
-    /** */
-    public void setProductId(long productId) {
-        this.productId = productId;
-    }
-
-    /** */
-    public long getProductId() {
-        return productId;
-    }
-
-    /** */
-    public void setDate(Instant date) {
-        this.date = date;
-    }
-
-    /** */
-    public Instant getDate() {
-        return date;
-    }
-
-    /** */
-    public void setAmount(int amount) {
-        this.amount = amount;
-    }
-
-    /** */
-    public int getAmount() {
-        return amount;
-    }
-
-    /** */
-    public void setPrice(float price) {
-        this.price = price;
-    }
-
-    /** */
-    public float getPrice() {
-        return price;
-    }
-
-    /** */
-    public String getDayMillisecond() {
-        return FORMAT.format(date);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePerson.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePerson.java
deleted file mode 100644
index dbafde942b96d..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePerson.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.pojos;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Date;
-import java.util.List;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-
-/**
- * Simple POJO without getters/setters which could be stored as a value in Ignite cache
- */
-public class SimplePerson implements Externalizable {
-    /** */
-    @QuerySqlField(name = "person_num")
-    private long personNum;
-
-    /** */
-    @QuerySqlField(name = "first_name")
-    private String firstName;
-
-    /** */
-    @QuerySqlField(name = "last_name")
-    private String lastName;
-
-    /** */
-    @QuerySqlField(name = "age")
-    private short age;
-
-    /** */
-    @QuerySqlField(name = "married", index = true)
-    private boolean married;
-
-    /** */
-    @QuerySqlField(name = "height")
-    private long height;
-
-    /** */
-    @QuerySqlField(name = "weight")
-    private float weight;
-
-    /** */
-    @QuerySqlField(name = "birth_date")
-    private Date birthDate;
-
-    /** */
-    @QuerySqlField(name = "phones")
-    private List<String> phones;
-
-    /** */
-    public SimplePerson() {
-    }
-
-    /** */
-    public SimplePerson(Person person) {
-        this.personNum = person.getPersonNumber();
-        this.firstName = person.getFirstName();
-        this.lastName = person.getLastName();
-        this.age = person.getAge();
-        this.married = person.getMarried();
-        this.height = person.getHeight();
-        this.weight = person.getWeight();
-        this.birthDate = person.getBirthDate();
-        this.phones = person.getPhones();
-    }
-
-    /** */
-    public SimplePerson(long personNum, String firstName, String lastName, short age, boolean married,
-                        long height, float weight, Date birthDate, List<String> phones) {
-        this.personNum = personNum;
-        this.firstName = firstName;
-        this.lastName = lastName;
-        this.age = age;
-        this.married = married;
-        this.height = height;
-        this.weight = weight;
-        this.birthDate = birthDate;
-        this.phones = phones;
-    }
-
-
-    /** {@inheritDoc} */
-    @Override public void writeExternal(ObjectOutput out) throws IOException {
-        out.writeLong(personNum);
-        out.writeObject(firstName);
-        out.writeObject(lastName);
-        out.writeShort(age);
-        out.writeBoolean(married);
-        out.writeLong(height);
-        out.writeFloat(weight);
-        out.writeObject(birthDate);
-        out.writeObject(phones);
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-        personNum = in.readLong();
-        firstName = (String)in.readObject();
-        lastName = (String)in.readObject();
-        age = in.readShort();
-        married = in.readBoolean();
-        height = in.readLong();
-        weight = in.readFloat();
-        birthDate = (Date)in.readObject();
-        phones = (List<String>)in.readObject();
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("SimplifiableIfStatement")
-    @Override public boolean equals(Object obj) {
-        if (obj == null || !(obj instanceof SimplePerson))
-            return false;
-
-        SimplePerson person = (SimplePerson)obj;
-
-        if (personNum != person.personNum)
-            return false;
-
-        if ((firstName != null && !firstName.equals(person.firstName)) ||
-            (person.firstName != null && !person.firstName.equals(firstName)))
-            return false;
-
-        if ((lastName != null && !lastName.equals(person.lastName)) ||
-            (person.lastName != null && !person.lastName.equals(lastName)))
-            return false;
-
-        if ((birthDate != null && !birthDate.equals(person.birthDate)) ||
-            (person.birthDate != null && !person.birthDate.equals(birthDate)))
-            return false;
-
-        if ((phones != null && !phones.equals(person.phones)) ||
-            (person.phones != null && !person.phones.equals(phones)))
-            return false;
-
-        return age == person.age && married == person.married &&
-            height == person.height && weight == person.weight;
-    }
-
-    /** */
-    @SuppressWarnings("SimplifiableIfStatement")
-    public boolean equalsPrimitiveFields(Object obj) {
-        if (obj == null || !(obj instanceof SimplePerson))
-            return false;
-
-        SimplePerson person = (SimplePerson)obj;
-
-        if (personNum != person.personNum)
-            return false;
-
-        if ((firstName != null && !firstName.equals(person.firstName)) ||
-            (person.firstName != null && !person.firstName.equals(firstName)))
-            return false;
-
-        if ((lastName != null && !lastName.equals(person.lastName)) ||
-            (person.lastName != null && !person.lastName.equals(lastName)))
-            return false;
-
-        if ((birthDate != null && !birthDate.equals(person.birthDate)) ||
-            (person.birthDate != null && !person.birthDate.equals(birthDate)))
-            return false;
-
-        return age == person.age && married == person.married &&
-            height == person.height && weight == person.weight;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePersonId.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePersonId.java
deleted file mode 100644
index 75e7c4eeb897b..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/SimplePersonId.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.pojos;
-
-import java.io.Serializable;
-import org.apache.ignite.cache.affinity.AffinityKeyMapped;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-
-/**
- * Simple POJO without getters/setters which could be stored as a key in Ignite cache
- */
-public class SimplePersonId implements Serializable {
-    /** */
-    @AffinityKeyMapped
-    @QuerySqlField(name = "company_code")
-    public String companyCode;
-
-    /** */
-    @AffinityKeyMapped
-    @QuerySqlField(name = "department_code")
-    public String departmentCode;
-
-    /** */
-    @QuerySqlField(name = "person_num")
-    public long personNum;
-
-    /** */
-    public SimplePersonId() {
-    }
-
-    /** */
-    public SimplePersonId(PersonId personId) {
-        this.companyCode = personId.getCompanyCode();
-        this.departmentCode = personId.getDepartmentCode();
-        this.personNum = personId.getPersonNumber();
-    }
-
-    /** */
-    public SimplePersonId(String companyCode, String departmentCode, long personNum) {
-        this.companyCode = companyCode;
-        this.departmentCode = departmentCode;
-        this.personNum = personNum;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("SimplifiableIfStatement")
-    @Override public boolean equals(Object obj) {
-        if (obj == null || !(obj instanceof SimplePersonId))
-            return false;
-
-        SimplePersonId id = (SimplePersonId)obj;
-
-        if ((companyCode != null && !companyCode.equals(id.companyCode)) ||
-            (id.companyCode != null && !id.companyCode.equals(companyCode)))
-            return false;
-
-        if ((companyCode != null && !companyCode.equals(id.companyCode)) ||
-            (id.companyCode != null && !id.companyCode.equals(companyCode)))
-            return false;
-
-        return personNum == id.personNum;
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        String code = (companyCode == null ? "" : companyCode) +
-            (departmentCode == null ? "" : departmentCode) +
-                personNum;
-
-        return code.hashCode();
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/package-info.java
deleted file mode 100644
index daa86ad31d850..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/pojos/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains sample POJO objects used in unit tests
- */
-
-package org.apache.ignite.tests.pojos;
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
deleted file mode 100644
index bf996cdd258a3..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CacheStoreHelper.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import java.lang.reflect.Field;
-import org.apache.ignite.cache.store.CacheStore;
-import org.apache.ignite.cache.store.CacheStoreSession;
-import org.apache.ignite.cache.store.cassandra.CassandraCacheStore;
-import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
-import org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings;
-import org.apache.ignite.testframework.junits.logger.GridTestLog4jLogger;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.springframework.core.io.Resource;
-
-/**
- * Helper class utilized by unit tests to get appropriate instance of {@link CacheStore}
- */
-public class CacheStoreHelper {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger(CacheStoreHelper.class.getName());
-
-    /** */
-    public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn) {
-        return createCacheStore(cacheName, persistenceSettings, conn, null, LOGGER);
-    }
-
-    /** */
-    public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn,
-        CacheStoreSession session) {
-        return createCacheStore(cacheName, persistenceSettings, conn, session, LOGGER);
-    }
-
-    /** */
-    public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn,
-        Logger log) {
-        return createCacheStore(cacheName, persistenceSettings, conn, null, log);
-    }
-
-    /** */
-    public static CacheStore createCacheStore(String cacheName, Resource persistenceSettings, DataSource conn,
-        CacheStoreSession session, Logger log) {
-        CassandraCacheStore<Integer, Integer> cacheStore =
-            new CassandraCacheStore<>(conn, new KeyValuePersistenceSettings(persistenceSettings),
-                Runtime.getRuntime().availableProcessors());
-
-        try {
-            Field sesField = CassandraCacheStore.class.getDeclaredField("storeSes");
-            Field logField = CassandraCacheStore.class.getDeclaredField("log");
-
-            sesField.setAccessible(true);
-            logField.setAccessible(true);
-
-            sesField.set(cacheStore, session != null ? session : new TestCacheSession(cacheName));
-            logField.set(cacheStore, new GridTestLog4jLogger(log));
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to initialize test Ignite cache store", e);
-        }
-
-        return cacheStore;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
deleted file mode 100644
index e7047f315bb45..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraAdminCredentials.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import org.apache.ignite.cache.store.cassandra.datasource.Credentials;
-
-/**
- * Implementation of {@link Credentials} providing admin user/password to establish Cassandra session.
- */
-public class CassandraAdminCredentials implements Credentials {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public String getUser() {
-        return CassandraHelper.getAdminUser();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getPassword() {
-        return CassandraHelper.getAdminPassword();
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java
deleted file mode 100644
index 559294a99068f..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraHelper.java
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import java.lang.reflect.Field;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.ResourceBundle;
-import java.util.concurrent.atomic.AtomicInteger;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.Statement;
-import org.apache.ignite.cache.store.cassandra.datasource.DataSource;
-import org.apache.ignite.cache.store.cassandra.session.pool.SessionPool;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lifecycle.LifecycleEventType;
-import org.apache.ignite.testframework.junits.logger.GridTestLog4jLogger;
-import org.apache.logging.log4j.Logger;
-import org.springframework.context.ApplicationContext;
-import org.springframework.context.support.ClassPathXmlApplicationContext;
-
-/**
- * Helper class providing bunch of utility methods to work with Cassandra
- */
-public class CassandraHelper {
-    /** */
-    private static final ResourceBundle CREDENTIALS = ResourceBundle.getBundle("org/apache/ignite/tests/cassandra/credentials");
-
-    /** */
-    private static final ResourceBundle CONNECTION = ResourceBundle.getBundle("org/apache/ignite/tests/cassandra/connection");
-
-    /** */
-    private static final ResourceBundle KEYSPACES = ResourceBundle.getBundle("org/apache/ignite/tests/cassandra/keyspaces");
-
-    /** */
-    private static final String EMBEDDED_CASSANDRA_YAML = "org/apache/ignite/tests/cassandra/embedded-cassandra.yaml";
-
-    /** */
-    private static final ApplicationContext connectionContext =
-        new ClassPathXmlApplicationContext("org/apache/ignite/tests/cassandra/connection-settings.xml");
-
-    /** */
-    private static DataSource adminDataSrc;
-
-    /** */
-    private static DataSource regularDataSrc;
-
-    /** */
-    private static Cluster adminCluster;
-
-    /** */
-    private static Cluster regularCluster;
-
-    /** */
-    private static Session adminSes;
-
-    /** */
-    private static Session regularSes;
-
-    /** */
-    private static CassandraLifeCycleBean embeddedCassandraBean;
-
-    /** */
-    public static String getAdminUser() {
-        return CREDENTIALS.getString("admin.user");
-    }
-
-    /** */
-    public static String getAdminPassword() {
-        return CREDENTIALS.getString("admin.password");
-    }
-
-    /** */
-    public static String getRegularUser() {
-        return CREDENTIALS.getString("regular.user");
-    }
-
-    /** */
-    public static String getRegularPassword() {
-        return CREDENTIALS.getString("regular.password");
-    }
-
-    /** */
-    public static String[] getTestKeyspaces() {
-        return KEYSPACES.getString("keyspaces").split(",");
-    }
-
-    /** */
-    private static AtomicInteger refCounter = new AtomicInteger(0);
-
-    /** */
-    public static String[] getContactPointsArray() {
-        String[] points = CONNECTION.getString("contact.points").split(",");
-
-        if (points.length == 0)
-            throw new RuntimeException("No Cassandra contact points specified");
-
-        for (int i = 0; i < points.length; i++)
-            points[i] = points[i].trim();
-
-        return points;
-    }
-
-    /** */
-    public static List<InetAddress> getContactPoints() {
-        String[] points = getContactPointsArray();
-
-        List<InetAddress> contactPoints = new LinkedList<>();
-
-        for (String point : points) {
-            if (point.contains(":"))
-                continue;
-
-            try {
-                contactPoints.add(InetAddress.getByName(point));
-            }
-            catch (Throwable e) {
-                throw new IllegalArgumentException("Incorrect contact point '" + point +
-                    "' specified for Cassandra cache storage", e);
-            }
-        }
-
-        return contactPoints;
-    }
-
-    /** */
-    public static List<InetSocketAddress> getContactPointsWithPorts() {
-        String[] points = getContactPointsArray();
-
-        List<InetSocketAddress> contactPoints = new LinkedList<>();
-
-        for (String point : points) {
-            if (!point.contains(":"))
-                continue;
-
-            String[] chunks = point.split(":");
-
-            try {
-                contactPoints.add(InetSocketAddress.createUnresolved(chunks[0].trim(), Integer.parseInt(chunks[1].trim())));
-            }
-            catch (Throwable e) {
-                throw new IllegalArgumentException("Incorrect contact point '" + point +
-                    "' specified for Cassandra cache storage", e);
-            }
-        }
-
-        return contactPoints;
-    }
-
-    /**
-     * Checks if embedded Cassandra should be used for unit tests
-     * @return true if embedded Cassandra should be used
-     */
-    public static boolean useEmbeddedCassandra() {
-        String[] contactPoints = getContactPointsArray();
-
-        return contactPoints != null && contactPoints.length == 1 && contactPoints[0].trim().startsWith("127.0.0.1");
-    }
-
-    /** */
-    public static void dropTestKeyspaces() {
-        String[] keyspaces = getTestKeyspaces();
-
-        for (String keyspace : keyspaces) {
-            try {
-                executeWithAdminCredentials("DROP KEYSPACE IF EXISTS " + keyspace + ";");
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to drop keyspace: " + keyspace, e);
-            }
-        }
-    }
-
-    /** */
-    public static ResultSet executeWithAdminCredentials(String statement, Object... args) {
-        if (args == null || args.length == 0)
-            return adminSession().execute(statement);
-
-        PreparedStatement ps = adminSession().prepare(statement);
-        return adminSession().execute(ps.bind(args));
-    }
-
-    /** */
-    public static ResultSet executeWithRegularCredentials(String statement, Object... args) {
-        if (args == null || args.length == 0)
-            return regularSession().execute(statement);
-
-        PreparedStatement ps = regularSession().prepare(statement);
-        return regularSession().execute(ps.bind(args));
-    }
-
-    /** */
-    public static ResultSet executeWithAdminCredentials(Statement statement) {
-        return adminSession().execute(statement);
-    }
-
-    /** */
-    public static ResultSet executeWithRegularCredentials(Statement statement) {
-        return regularSession().execute(statement);
-    }
-
-    /** */
-    public static synchronized DataSource getAdminDataSrc() {
-        if (adminDataSrc != null)
-            return adminDataSrc;
-
-        return adminDataSrc = (DataSource)connectionContext.getBean("cassandraAdminDataSource");
-    }
-
-    /** */
-    public static synchronized DataSource getRegularDataSrc() {
-        if (regularDataSrc != null)
-            return regularDataSrc;
-
-        return regularDataSrc = (DataSource)connectionContext.getBean("cassandraRegularDataSource");
-    }
-
-    /** */
-    public static void testAdminConnection() {
-        try {
-            adminSession();
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to check admin connection to Cassandra", e);
-        }
-    }
-
-    /** */
-    public static void testRegularConnection() {
-        try {
-            regularSession();
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to check regular connection to Cassandra", e);
-        }
-    }
-
-    /** */
-    public static synchronized void releaseCassandraResources() {
-        try {
-            if (adminSes != null && !adminSes.isClosed())
-                U.closeQuiet(adminSes);
-        }
-        finally {
-            adminSes = null;
-        }
-
-        try {
-            if (adminCluster != null && !adminCluster.isClosed())
-                U.closeQuiet(adminCluster);
-        }
-        finally {
-            adminCluster = null;
-        }
-
-        try {
-            if (regularSes != null && !regularSes.isClosed())
-                U.closeQuiet(regularSes);
-        }
-        finally {
-            regularSes = null;
-        }
-
-        try {
-            if (regularCluster != null && !regularCluster.isClosed())
-                U.closeQuiet(regularCluster);
-        }
-        finally {
-            regularCluster = null;
-        }
-
-        SessionPool.release();
-    }
-
-    /** */
-    private static synchronized Session adminSession() {
-        if (adminSes != null)
-            return adminSes;
-
-        try {
-            Cluster.Builder builder = Cluster.builder();
-            builder = builder.withCredentials(getAdminUser(), getAdminPassword());
-            builder.addContactPoints(getContactPoints());
-            builder.addContactPointsWithPorts(getContactPointsWithPorts());
-
-            adminCluster = builder.build();
-            return adminSes = adminCluster.connect();
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to create admin session to Cassandra database", e);
-        }
-    }
-
-    /** */
-    private static synchronized Session regularSession() {
-        if (regularSes != null)
-            return regularSes;
-
-        try {
-            Cluster.Builder builder = Cluster.builder();
-            builder = builder.withCredentials(getRegularUser(), getRegularPassword());
-            builder.addContactPoints(getContactPoints());
-            builder.addContactPointsWithPorts(getContactPointsWithPorts());
-
-            regularCluster = builder.build();
-            return regularSes = regularCluster.connect();
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to create regular session to Cassandra database", e);
-        }
-    }
-
-    /**
-     * Note that setting of cassandra.storagedir property is expected.
-     */
-    public static void startEmbeddedCassandra(Logger log) {
-        if (refCounter.getAndIncrement() > 0)
-            return;
-
-        ClassLoader clsLdr = CassandraHelper.class.getClassLoader();
-        URL url = clsLdr.getResource(EMBEDDED_CASSANDRA_YAML);
-
-        embeddedCassandraBean = new CassandraLifeCycleBean();
-        embeddedCassandraBean.setCassandraConfigFile(url.getFile());
-
-        try {
-            Field logField = CassandraLifeCycleBean.class.getDeclaredField("log");
-            logField.setAccessible(true);
-            logField.set(embeddedCassandraBean, new GridTestLog4jLogger(log));
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to initialize logger for CassandraLifeCycleBean", e);
-        }
-
-        embeddedCassandraBean.onLifecycleEvent(LifecycleEventType.BEFORE_NODE_START);
-    }
-
-    /** */
-    public static void stopEmbeddedCassandra() {
-        if (refCounter.decrementAndGet() > 0)
-            return;
-
-        if (embeddedCassandraBean != null)
-            embeddedCassandraBean.onLifecycleEvent(LifecycleEventType.BEFORE_NODE_STOP);
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java
deleted file mode 100644
index 6ddc7544b0593..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraLifeCycleBean.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import org.apache.cassandra.service.CassandraDaemon;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.lifecycle.LifecycleBean;
-import org.apache.ignite.lifecycle.LifecycleEventType;
-import org.apache.ignite.resources.LoggerResource;
-
-/**
- * Implementation of {@link LifecycleBean} to start embedded Cassandra instance on Ignite cluster startup
- */
-public class CassandraLifeCycleBean implements LifecycleBean {
-    /** System property specifying Cassandra jmx port */
-    private static final String CASSANDRA_JMX_PORT_PROP = "cassandra.jmx.local.port";
-
-    /** System property specifying Cassandra YAML config file */
-    private static final String CASSANDRA_CONFIG_PROP = "cassandra.config";
-
-    /** Prefix for file path syntax */
-    private static final String FILE_PREFIX = "file:///";
-
-    /** Auto-injected logger instance. */
-    @LoggerResource
-    private IgniteLogger log;
-
-    /** Instance of embedded Cassandra database */
-    private CassandraDaemon embeddedCassandraDaemon;
-
-    /** JMX port for embedded Cassandra instance */
-    private String jmxPort;
-
-    /** YAML config file for embedded Cassandra */
-    private String cassandraCfgFile;
-
-    /**
-     * Returns JMX port for embedded Cassandra
-     * @return JMX port
-     */
-    public String getJmxPort() {
-        return jmxPort;
-    }
-
-    /**
-     * Setter for embedded Cassandra JMX port
-     * @param jmxPort embedded Cassandra JMX port
-     */
-    public void setJmxPort(String jmxPort) {
-        this.jmxPort = jmxPort;
-    }
-
-    /**
-     * Returns embedded Cassandra YAML config file
-     * @return YAML config file
-     */
-    public String getCassandraConfigFile() {
-        return cassandraCfgFile;
-    }
-
-    /**
-     * Setter for embedded Cassandra YAML config file
-     * @param cassandraCfgFile YAML config file
-     */
-    public void setCassandraConfigFile(String cassandraCfgFile) {
-        this.cassandraCfgFile = cassandraCfgFile;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void onLifecycleEvent(LifecycleEventType evt) {
-        if (evt == LifecycleEventType.BEFORE_NODE_START)
-            startEmbeddedCassandra();
-        else if (evt == LifecycleEventType.BEFORE_NODE_STOP)
-            stopEmbeddedCassandra();
-    }
-
-    /**
-     * Starts embedded Cassandra instance
-     */
-    private void startEmbeddedCassandra() {
-        if (log != null) {
-            log.info("-------------------------------");
-            log.info("| Starting embedded Cassandra |");
-            log.info("-------------------------------");
-        }
-
-        try {
-            if (jmxPort != null)
-                System.setProperty(CASSANDRA_JMX_PORT_PROP, jmxPort);
-
-            if (cassandraCfgFile != null)
-                System.setProperty(CASSANDRA_CONFIG_PROP, FILE_PREFIX + cassandraCfgFile);
-
-            embeddedCassandraDaemon = new CassandraDaemon(true);
-            embeddedCassandraDaemon.applyConfig();
-            embeddedCassandraDaemon.init(null);
-            embeddedCassandraDaemon.start();
-        }
-        catch (Exception e) {
-            throw new RuntimeException("Failed to start embedded Cassandra", e);
-        }
-
-        if (log != null) {
-            log.info("------------------------------");
-            log.info("| Embedded Cassandra started |");
-            log.info("------------------------------");
-        }
-    }
-
-    /**
-     * Stops embedded Cassandra instance
-     */
-    private void stopEmbeddedCassandra() {
-        if (log != null) {
-            log.info("-------------------------------");
-            log.info("| Stopping embedded Cassandra |");
-            log.info("-------------------------------");
-        }
-
-        if (embeddedCassandraDaemon != null) {
-            try {
-                embeddedCassandraDaemon.deactivate();
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to stop embedded Cassandra", e);
-            }
-        }
-
-        if (log != null) {
-            log.info("------------------------------");
-            log.info("| Embedded Cassandra stopped |");
-            log.info("------------------------------");
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
deleted file mode 100644
index 7546c9bb5d3f0..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/CassandraRegularCredentials.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import org.apache.ignite.cache.store.cassandra.datasource.Credentials;
-
-/**
- * Implementation of {@link Credentials} providing regular user/password to establish Cassandra session.
- */
-public class CassandraRegularCredentials implements Credentials {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** {@inheritDoc} */
-    @Override public String getUser() {
-        return CassandraHelper.getRegularUser();
-    }
-
-    /** {@inheritDoc} */
-    @Override public String getPassword() {
-        return CassandraHelper.getRegularPassword();
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
deleted file mode 100644
index c4272bdc577c3..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestCacheSession.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import java.util.Map;
-import org.apache.ignite.cache.store.CacheStoreSession;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.transactions.Transaction;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Test implementation of {@link CacheStoreSession} for the unit tests purposes
- */
-public class TestCacheSession implements CacheStoreSession {
-    /** */
-    private String cacheName;
-
-    /** */
-    private Transaction tx;
-
-    /** */
-    private Map<Object, Object> props = U.newHashMap(1);
-
-    /** */
-    private Object attach;
-
-    /** */
-    public TestCacheSession(String cacheName) {
-        this.cacheName = cacheName;
-    }
-
-    /** */
-    public TestCacheSession(String cacheName, Transaction tx, Map<Object, Object> props) {
-        this.cacheName = cacheName;
-        this.tx = tx;
-        this.props = props;
-    }
-
-    /** */
-    public void newSession(@Nullable Transaction tx) {
-        this.tx = tx;
-        props = null;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public Transaction transaction() {
-        return tx;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isWithinTransaction() {
-        return transaction() != null;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public Object attach(@Nullable Object attach) {
-        Object prev = this.attach;
-        this.attach = attach;
-        return prev;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Nullable @Override public <T> T attachment() {
-        return (T)attach;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("unchecked")
-    @Override public <K, V> Map<K, V> properties() {
-        return (Map<K, V>)props;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public String cacheName() {
-        return cacheName;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java
deleted file mode 100644
index 6681dd6a224d0..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import java.util.UUID;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.lang.IgniteAsyncSupport;
-import org.apache.ignite.lang.IgniteFuture;
-import org.apache.ignite.lang.IgniteUuid;
-import org.apache.ignite.transactions.Transaction;
-import org.apache.ignite.transactions.TransactionConcurrency;
-import org.apache.ignite.transactions.TransactionIsolation;
-import org.apache.ignite.transactions.TransactionState;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Dummy transaction for test purposes.
- */
-public class TestTransaction implements Transaction {
-    /** */
-    private final IgniteUuid xid = IgniteUuid.randomUuid();
-
-    /** {@inheritDoc} */
-    @Nullable @Override public IgniteUuid xid() {
-        return xid;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public UUID nodeId() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long threadId() {
-        return 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long startTime() {
-        return 0;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public TransactionIsolation isolation() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public TransactionConcurrency concurrency() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean implicit() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isInvalidate() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public TransactionState state() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long timeout() {
-        return 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public long timeout(long timeout) {
-        return 0;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean setRollbackOnly() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isRollbackOnly() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void commit() {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteFuture<Void> commitAsync() throws IgniteException {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void close() {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteAsyncSupport withAsync() {
-        throw new UnsupportedOperationException();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isAsync() {
-        return false;
-    }
-
-    /** {@inheritDoc} */
-    @Override public <R> IgniteFuture<R> future() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void rollback() {
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Override public IgniteFuture<Void> rollbackAsync() throws IgniteException {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void suspend() throws IgniteException{
-        // No-op.
-    }
-
-    /** {@inheritDoc} */
-    @Nullable @Override public String label() {
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public void resume() throws IgniteException {
-        // No-op.
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java
deleted file mode 100644
index 67c00f8aa3373..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestsHelper.java
+++ /dev/null
@@ -1,752 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tests.utils;
-
-import java.time.Instant;
-import java.time.LocalDate;
-import java.time.ZoneOffset;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.ResourceBundle;
-import java.util.Set;
-import org.apache.ignite.cache.store.cassandra.common.SystemHelper;
-import org.apache.ignite.internal.processors.cache.CacheEntryImpl;
-import org.apache.ignite.tests.load.Generator;
-import org.apache.ignite.tests.pojos.Person;
-import org.apache.ignite.tests.pojos.PersonId;
-import org.apache.ignite.tests.pojos.Product;
-import org.apache.ignite.tests.pojos.ProductOrder;
-import org.apache.ignite.tests.pojos.SimplePerson;
-import org.apache.ignite.tests.pojos.SimplePersonId;
-import org.springframework.core.io.ClassPathResource;
-
-/**
- * Helper class for all tests
- */
-public class TestsHelper {
-    /** */
-    private static final String LETTERS_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-
-    /** */
-    private static final String NUMBERS_ALPHABET = "0123456789";
-
-    /** */
-    private static final Random RANDOM = new Random(System.currentTimeMillis());
-
-    /** */
-    private static final ResourceBundle TESTS_SETTINGS = ResourceBundle.getBundle("tests");
-
-    /** */
-    private static final int BULK_OPERATION_SIZE = parseTestSettings("bulk.operation.size");
-
-    /** */
-    private static final String LOAD_TESTS_CACHE_NAME = TESTS_SETTINGS.getString("load.tests.cache.name");
-
-    /** */
-    private static final int LOAD_TESTS_THREADS_COUNT = parseTestSettings("load.tests.threads.count");
-
-    /** */
-    private static final int LOAD_TESTS_WARMUP_PERIOD = parseTestSettings("load.tests.warmup.period");
-
-    /** */
-    private static final int LOAD_TESTS_EXECUTION_TIME = parseTestSettings("load.tests.execution.time");
-
-    /** */
-    private static final int LOAD_TESTS_REQUESTS_LATENCY = parseTestSettings("load.tests.requests.latency");
-
-    /** */
-    private static final int TRANSACTION_PRODUCTS_COUNT = parseTestSettings("transaction.products.count");
-
-    /** */
-    private static final int TRANSACTION_ORDERS_COUNT = parseTestSettings("transaction.orders.count");
-
-    /** */
-    private static final int ORDERS_YEAR;
-
-    /** */
-    private static final int ORDERS_MONTH;
-
-    /** */
-    private static final int ORDERS_DAY;
-
-    /** */
-    private static final String LOAD_TESTS_PERSISTENCE_SETTINGS = TESTS_SETTINGS.getString("load.tests.persistence.settings");
-
-    /** */
-    private static final String LOAD_TESTS_IGNITE_CONFIG = TESTS_SETTINGS.getString("load.tests.ignite.config");
-
-    /** */
-    private static final Generator LOAD_TESTS_KEY_GENERATOR;
-
-    /** */
-    private static final Generator LOAD_TESTS_VALUE_GENERATOR;
-
-    /** */
-    private static final String HOST_PREFIX;
-
-    static {
-        try {
-            LOAD_TESTS_KEY_GENERATOR = (Generator)Class.forName(TESTS_SETTINGS.getString("load.tests.key.generator")).newInstance();
-            LOAD_TESTS_VALUE_GENERATOR = (Generator)Class.forName(TESTS_SETTINGS.getString("load.tests.value.generator")).newInstance();
-
-            String[] parts = SystemHelper.HOST_IP.split("\\.");
-
-            String prefix = parts[3];
-            prefix = prefix.length() > 2 ? prefix.substring(prefix.length() - 2) : prefix;
-
-            HOST_PREFIX = prefix;
-
-            LocalDate date = LocalDate.now();
-
-            String year = TESTS_SETTINGS.getString("orders.year");
-            ORDERS_YEAR = !year.trim().isEmpty() ? Integer.parseInt(year) : date.getYear();
-
-            String month = TESTS_SETTINGS.getString("orders.month");
-            ORDERS_MONTH = !month.trim().isEmpty() ? (Integer.parseInt(month) + 1) : date.getMonthValue();
-
-            String day = TESTS_SETTINGS.getString("orders.day");
-            ORDERS_DAY = !day.trim().isEmpty() ? Integer.parseInt(day) : date.getDayOfMonth();
-        }
-        catch (Throwable e) {
-            throw new RuntimeException("Failed to initialize TestsHelper", e);
-        }
-    }
-
-    /** */
-    private static int parseTestSettings(String name) {
-        return Integer.parseInt(TESTS_SETTINGS.getString(name));
-    }
-
-    /** */
-    public static int getLoadTestsThreadsCount() {
-        return LOAD_TESTS_THREADS_COUNT;
-    }
-
-    /** */
-    public static int getLoadTestsWarmupPeriod() {
-        return LOAD_TESTS_WARMUP_PERIOD;
-    }
-
-    /** */
-    public static int getLoadTestsExecutionTime() {
-        return LOAD_TESTS_EXECUTION_TIME;
-    }
-
-    /** */
-    public static int getLoadTestsRequestsLatency() {
-        return LOAD_TESTS_REQUESTS_LATENCY;
-    }
-
-    /** */
-    public static ClassPathResource getLoadTestsPersistenceSettings() {
-        return new ClassPathResource(LOAD_TESTS_PERSISTENCE_SETTINGS);
-    }
-
-    /** */
-    public static String getLoadTestsIgniteConfig() {
-        return LOAD_TESTS_IGNITE_CONFIG;
-    }
-
-    /** */
-    public static int getBulkOperationSize() {
-        return BULK_OPERATION_SIZE;
-    }
-
-    /** */
-    public static String getLoadTestsCacheName() {
-        return LOAD_TESTS_CACHE_NAME;
-    }
-
-    /** */
-    public static Object generateLoadTestsKey(long i) {
-        return LOAD_TESTS_KEY_GENERATOR.generate(i);
-    }
-
-    /** */
-    public static Object generateLoadTestsValue(long i) {
-        return LOAD_TESTS_VALUE_GENERATOR.generate(i);
-    }
-
-    /** */
-    @SuppressWarnings("unchecked")
-    public static CacheEntryImpl generateLoadTestsEntry(long i) {
-        return new CacheEntryImpl(TestsHelper.generateLoadTestsKey(i), TestsHelper.generateLoadTestsValue(i));
-    }
-
-    /** */
-    public static <K, V> Collection<K> getKeys(Collection<CacheEntryImpl<K, V>> entries) {
-        List<K> list = new LinkedList<>();
-
-        for (CacheEntryImpl<K, ?> entry : entries)
-            list.add(entry.getKey());
-
-        return list;
-    }
-
-    /** */
-    public static Map<Long, Long> generateLongsMap() {
-        return generateLongsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<Long, Long> generateLongsMap(int cnt) {
-        Map<Long, Long> map = new HashMap<>();
-
-        for (long i = 0; i < cnt; i++)
-            map.put(i, i + 123);
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<Long, Long>> generateLongsEntries() {
-        return generateLongsEntries(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<Long, Long>> generateLongsEntries(int cnt) {
-        Collection<CacheEntryImpl<Long, Long>> entries = new LinkedList<>();
-
-        for (long i = 0; i < cnt; i++)
-            entries.add(new CacheEntryImpl<>(i, i + 123));
-
-        return entries;
-    }
-
-    /** */
-    public static Map<String, String> generateStringsMap() {
-        return generateStringsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<String, String> generateStringsMap(int cnt) {
-        Map<String, String> map = new HashMap<>();
-
-        for (int i = 0; i < cnt; i++)
-            map.put(Integer.toString(i), randomString(5));
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<String, String>> generateStringsEntries() {
-        return generateStringsEntries(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<String, String>> generateStringsEntries(int cnt) {
-        Collection<CacheEntryImpl<String, String>> entries = new LinkedList<>();
-
-        for (int i = 0; i < cnt; i++)
-            entries.add(new CacheEntryImpl<>(Integer.toString(i), randomString(5)));
-
-        return entries;
-    }
-
-    /** */
-    public static Map<Long, Person> generateLongsPersonsMap() {
-        Map<Long, Person> map = new HashMap<>();
-
-        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
-            map.put(i, generateRandomPerson(i));
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<Long, Person>> generateLongsPersonsEntries() {
-        Collection<CacheEntryImpl<Long, Person>> entries = new LinkedList<>();
-
-        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
-            entries.add(new CacheEntryImpl<>(i, generateRandomPerson(i)));
-
-        return entries;
-    }
-
-    /** */
-    public static Map<SimplePersonId, SimplePerson> generateSimplePersonIdsPersonsMap() {
-        return generateSimplePersonIdsPersonsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<SimplePersonId, SimplePerson> generateSimplePersonIdsPersonsMap(int cnt) {
-        Map<SimplePersonId, SimplePerson> map = new HashMap<>();
-
-        for (int i = 0; i < cnt; i++) {
-            PersonId id = generateRandomPersonId();
-
-            map.put(new SimplePersonId(id), new SimplePerson(generateRandomPerson(id.getPersonNumber())));
-        }
-
-        return map;
-    }
-
-    /** */
-    public static Map<PersonId, Person> generatePersonIdsPersonsMap() {
-        return generatePersonIdsPersonsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<PersonId, Person> generatePersonIdsPersonsMap(int cnt) {
-        Map<PersonId, Person> map = new HashMap<>();
-
-        for (int i = 0; i < cnt; i++) {
-            PersonId id = generateRandomPersonId();
-
-            map.put(id, generateRandomPerson(id.getPersonNumber()));
-        }
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<SimplePersonId, SimplePerson>> generateSimplePersonIdsPersonsEntries() {
-        return generateSimplePersonIdsPersonsEntries(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<SimplePersonId, SimplePerson>> generateSimplePersonIdsPersonsEntries(int cnt) {
-        Collection<CacheEntryImpl<SimplePersonId, SimplePerson>> entries = new LinkedList<>();
-
-        for (int i = 0; i < cnt; i++) {
-            PersonId id = generateRandomPersonId();
-
-            entries.add(new CacheEntryImpl<>(new SimplePersonId(id), new SimplePerson(generateRandomPerson(id.getPersonNumber()))));
-        }
-
-        return entries;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<PersonId, Person>> generatePersonIdsPersonsEntries() {
-        return generatePersonIdsPersonsEntries(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<PersonId, Person>> generatePersonIdsPersonsEntries(int cnt) {
-        Collection<CacheEntryImpl<PersonId, Person>> entries = new LinkedList<>();
-
-        for (int i = 0; i < cnt; i++) {
-            PersonId id = generateRandomPersonId();
-
-            entries.add(new CacheEntryImpl<>(id, generateRandomPerson(id.getPersonNumber())));
-        }
-
-        return entries;
-    }
-
-    /** */
-    public static List<CacheEntryImpl<Long, Product>> generateProductEntries() {
-        List<CacheEntryImpl<Long, Product>> entries = new LinkedList<>();
-
-        for (long i = 0; i < BULK_OPERATION_SIZE; i++)
-            entries.add(new CacheEntryImpl<>(i, generateRandomProduct(i)));
-
-        return entries;
-    }
-
-    /** */
-    public static Collection<Long> getProductIds(Collection<CacheEntryImpl<Long, Product>> entries) {
-        List<Long> ids = new LinkedList<>();
-
-        for (CacheEntryImpl<Long, Product> entry : entries)
-            ids.add(entry.getKey());
-
-        return ids;
-    }
-
-    /** */
-    public static Map<Long, Product> generateProductsMap() {
-        return generateProductsMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<Long, Product> generateProductsMap(int count) {
-        Map<Long, Product> map = new HashMap<>();
-
-        for (long i = 0; i < count; i++)
-            map.put(i, generateRandomProduct(i));
-
-        return map;
-    }
-
-    /** */
-    public static Collection<CacheEntryImpl<Long, ProductOrder>> generateOrderEntries() {
-        Collection<CacheEntryImpl<Long, ProductOrder>> entries = new LinkedList<>();
-
-        for (long i = 0; i < BULK_OPERATION_SIZE; i++) {
-            ProductOrder order = generateRandomOrder(i);
-            entries.add(new CacheEntryImpl<>(order.getId(), order));
-        }
-
-        return entries;
-    }
-
-    /** */
-    public static Map<Long, ProductOrder> generateOrdersMap() {
-        return generateOrdersMap(BULK_OPERATION_SIZE);
-    }
-
-    /** */
-    public static Map<Long, ProductOrder> generateOrdersMap(int count) {
-        Map<Long, ProductOrder> map = new HashMap<>();
-
-        for (long i = 0; i < count; i++) {
-            ProductOrder order = generateRandomOrder(i);
-            map.put(order.getId(), order);
-        }
-
-        return map;
-    }
-
-    /** */
-    public static Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> generateOrdersPerProductEntries(
-            Collection<CacheEntryImpl<Long, Product>> products) {
-        return generateOrdersPerProductEntries(products, TRANSACTION_ORDERS_COUNT);
-    }
-
-    /** */
-    public static Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> generateOrdersPerProductEntries(
-            Collection<CacheEntryImpl<Long, Product>> products, int ordersPerProductCount) {
-        Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> map = new HashMap<>();
-
-        for (CacheEntryImpl<Long, Product> entry : products) {
-            List<CacheEntryImpl<Long, ProductOrder>> orders = new LinkedList<>();
-
-            for (long i = 0; i < ordersPerProductCount; i++) {
-                ProductOrder order = generateRandomOrder(entry.getKey());
-                orders.add(new CacheEntryImpl<>(order.getId(), order));
-            }
-
-            map.put(entry.getKey(), orders);
-        }
-
-        return map;
-    }
-
-    /** */
-    public static Map<Long, Map<Long, ProductOrder>> generateOrdersPerProductMap(Map<Long, Product> products) {
-        return generateOrdersPerProductMap(products, TRANSACTION_ORDERS_COUNT);
-    }
-
-    /** */
-    public static Map<Long, Map<Long, ProductOrder>> generateOrdersPerProductMap(Map<Long, Product> products,
-                                                                                 int ordersPerProductCount) {
-        Map<Long, Map<Long, ProductOrder>> map = new HashMap<>();
-
-        for (Map.Entry<Long, Product> entry : products.entrySet()) {
-            Map<Long, ProductOrder> orders = new HashMap<>();
-
-            for (long i = 0; i < ordersPerProductCount; i++) {
-                ProductOrder order = generateRandomOrder(entry.getKey());
-                orders.put(order.getId(), order);
-            }
-
-            map.put(entry.getKey(), orders);
-        }
-
-        return map;
-    }
-
-    /** */
-    public static Collection<Long> getOrderIds(Map<Long, List<CacheEntryImpl<Long, ProductOrder>>> orders) {
-        Set<Long> ids = new HashSet<>();
-
-        for (Long key : orders.keySet()) {
-            for (CacheEntryImpl<Long, ProductOrder> entry : orders.get(key))
-                ids.add(entry.getKey());
-        }
-
-        return ids;
-    }
-
-    /** */
-    public static SimplePerson generateRandomSimplePerson(long personNum) {
-        int phonesCnt = RANDOM.nextInt(4);
-
-        List<String> phones = new LinkedList<>();
-
-        for (int i = 0; i < phonesCnt; i++)
-            phones.add(randomNumber(4));
-
-        return new SimplePerson(personNum, randomString(4), randomString(4), (short)RANDOM.nextInt(100),
-                RANDOM.nextBoolean(), RANDOM.nextLong(), RANDOM.nextFloat(), new Date(), phones);
-    }
-
-    /** */
-    public static SimplePersonId generateRandomSimplePersonId() {
-        return new SimplePersonId(randomString(4), randomString(4), RANDOM.nextInt(100));
-    }
-
-    /** */
-    public static Person generateRandomPerson(long personNum) {
-        int phonesCnt = RANDOM.nextInt(4);
-
-        List<String> phones = new LinkedList<>();
-
-        for (int i = 0; i < phonesCnt; i++)
-            phones.add(randomNumber(4));
-
-        return new Person(personNum, randomString(4), randomString(4), (short)RANDOM.nextInt(100),
-            RANDOM.nextBoolean(), RANDOM.nextLong(), RANDOM.nextFloat(), new Date(), phones);
-    }
-
-    /** */
-    public static PersonId generateRandomPersonId() {
-        return new PersonId(randomString(4), randomString(4), RANDOM.nextInt(100));
-    }
-
-    /** */
-    public static Product generateRandomProduct(long id) {
-        return new Product(id, randomString(2), randomString(6), randomString(20), generateProductPrice(id));
-    }
-
-    /** */
-    public static ProductOrder generateRandomOrder(long productId) {
-        return generateRandomOrder(productId, RANDOM.nextInt(10000));
-    }
-
-    /** */
-    private static ProductOrder generateRandomOrder(long productId, int saltedNumber) {
-        LocalDate date = LocalDate.of(ORDERS_YEAR, ORDERS_MONTH, ORDERS_DAY);
-
-        long id = Long.parseLong(productId + System.currentTimeMillis() + HOST_PREFIX + saltedNumber);
-
-        return generateRandomOrder(id, productId, date.atStartOfDay().toInstant(ZoneOffset.UTC));
-    }
-
-    /** */
-    public static ProductOrder generateRandomOrder(long id, long productId, Instant date) {
-        return new ProductOrder(id, productId, generateProductPrice(productId), date, 1 + RANDOM.nextInt(20));
-    }
-
-    /** */
-    public static boolean checkMapsEqual(Map map1, Map map2) {
-        if (map1 == null || map2 == null || map1.size() != map2.size())
-            return false;
-
-        for (Object key : map1.keySet()) {
-            Object obj1 = map1.get(key);
-            Object obj2 = map2.get(key);
-
-            if (obj1 == null || obj2 == null || !obj1.equals(obj2))
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K, V> boolean checkCollectionsEqual(Map<K, V> map, Collection<CacheEntryImpl<K, V>> col) {
-        if (map == null || col == null || map.size() != col.size())
-            return false;
-
-        for (CacheEntryImpl<K, V> entry : col) {
-            if (!entry.getValue().equals(map.get(entry.getKey())))
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkSimplePersonMapsEqual(Map<K, SimplePerson> map1, Map<K, SimplePerson> map2,
-                                                   boolean primitiveFieldsOnly) {
-        if (map1 == null || map2 == null || map1.size() != map2.size())
-            return false;
-
-        for (K key : map1.keySet()) {
-            SimplePerson person1 = map1.get(key);
-            SimplePerson person2 = map2.get(key);
-
-            boolean equals = person1 != null && person2 != null &&
-                    (primitiveFieldsOnly ? person1.equalsPrimitiveFields(person2) : person1.equals(person2));
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkPersonMapsEqual(Map<K, Person> map1, Map<K, Person> map2,
-        boolean primitiveFieldsOnly) {
-        if (map1 == null || map2 == null || map1.size() != map2.size())
-            return false;
-
-        for (K key : map1.keySet()) {
-            Person person1 = map1.get(key);
-            Person person2 = map2.get(key);
-
-            boolean equals = person1 != null && person2 != null &&
-                (primitiveFieldsOnly ? person1.equalsPrimitiveFields(person2) : person1.equals(person2));
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkSimplePersonCollectionsEqual(Map<K, SimplePerson> map, Collection<CacheEntryImpl<K, SimplePerson>> col,
-                                                          boolean primitiveFieldsOnly) {
-        if (map == null || col == null || map.size() != col.size())
-            return false;
-
-        for (CacheEntryImpl<K, SimplePerson> entry : col) {
-            boolean equals = primitiveFieldsOnly ?
-                    entry.getValue().equalsPrimitiveFields(map.get(entry.getKey())) :
-                    entry.getValue().equals(map.get(entry.getKey()));
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkPersonCollectionsEqual(Map<K, Person> map, Collection<CacheEntryImpl<K, Person>> col,
-        boolean primitiveFieldsOnly) {
-        if (map == null || col == null || map.size() != col.size())
-            return false;
-
-        for (CacheEntryImpl<K, Person> entry : col) {
-            boolean equals = primitiveFieldsOnly ?
-                entry.getValue().equalsPrimitiveFields(map.get(entry.getKey())) :
-                entry.getValue().equals(map.get(entry.getKey()));
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkProductCollectionsEqual(Map<K, Product> map, Collection<CacheEntryImpl<K, Product>> col) {
-        if (map == null || col == null || map.size() != col.size())
-            return false;
-
-        for (CacheEntryImpl<K, Product> entry : col)
-            if (!entry.getValue().equals(map.get(entry.getKey())))
-                return false;
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkProductMapsEqual(Map<K, Product> map1, Map<K, Product> map2) {
-        if (map1 == null || map2 == null || map1.size() != map2.size())
-            return false;
-
-        for (K key : map1.keySet()) {
-            Product product1 = map1.get(key);
-            Product product2 = map2.get(key);
-
-            boolean equals = product1 != null && product2 != null && product1.equals(product2);
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkOrderCollectionsEqual(Map<K, ProductOrder> map, Collection<CacheEntryImpl<K, ProductOrder>> col) {
-        if (map == null || col == null || map.size() != col.size())
-            return false;
-
-        for (CacheEntryImpl<K, ProductOrder> entry : col)
-            if (!entry.getValue().equals(map.get(entry.getKey())))
-                return false;
-
-        return true;
-    }
-
-    /** */
-    public static <K> boolean checkOrderMapsEqual(Map<K, ProductOrder> map1, Map<K, ProductOrder> map2) {
-        if (map1 == null || map2 == null || map1.size() != map2.size())
-            return false;
-
-        for (K key : map1.keySet()) {
-            ProductOrder order1 = map1.get(key);
-            ProductOrder order2 = map2.get(key);
-
-            boolean equals = order1 != null && order2 != null && order1.equals(order2);
-
-            if (!equals)
-                return false;
-        }
-
-        return true;
-    }
-
-    /** */
-    public static String randomString(int len) {
-        StringBuilder builder = new StringBuilder(len);
-
-        for (int i = 0; i < len; i++)
-            builder.append(LETTERS_ALPHABET.charAt(RANDOM.nextInt(LETTERS_ALPHABET.length())));
-
-        return builder.toString();
-    }
-
-    /** */
-    public static String randomNumber(int len) {
-        StringBuilder builder = new StringBuilder(len);
-
-        for (int i = 0; i < len; i++)
-            builder.append(NUMBERS_ALPHABET.charAt(RANDOM.nextInt(NUMBERS_ALPHABET.length())));
-
-        return builder.toString();
-    }
-
-    /** */
-    private static float generateProductPrice(long productId) {
-        long id = productId < 1000 ?
-                (((productId + 1) * (productId + 1) * 1000) / 2) * 10 :
-                (productId / 20) * (productId / 20);
-
-        id = id == 0 ? 24 : id;
-
-        float price = Long.parseLong(Long.toString(id).replace("0", ""));
-
-        int i = 0;
-
-        while (price > 100) {
-            if (i % 2 != 0)
-                price = price / 2;
-            else
-                price = (float)Math.sqrt(price);
-
-            i++;
-        }
-
-        return ((float)((int)(price * 100))) / 100.0F;
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/package-info.java
deleted file mode 100644
index 2c8fec165a6bf..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains utility classes for unit tests
- */
-
-package org.apache.ignite.tests.utils;
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/IgniteCassandraStoreTestSuite.java b/modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/IgniteCassandraStoreTestSuite.java
deleted file mode 100644
index deeb1291b892f..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/IgniteCassandraStoreTestSuite.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.testsuites.cassandra.store;
-
-import org.apache.ignite.tests.CassandraConfigTest;
-import org.apache.ignite.tests.CassandraDirectPersistenceTest;
-import org.apache.ignite.tests.CassandraSessionImplTest;
-import org.apache.ignite.tests.DDLGeneratorTest;
-import org.apache.ignite.tests.DatasourceSerializationTest;
-import org.apache.ignite.tests.IgnitePersistentStorePrimitiveTest;
-import org.apache.ignite.tests.IgnitePersistentStoreTest;
-import org.apache.ignite.tests.utils.CassandraHelper;
-import org.apache.ignite.tools.junit.JUnitTeamcityReporter;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Suite;
-import org.junit.runners.Suite.SuiteClasses;
-
-/**
- * Cache suite for Cassandra store.
- *
- * Running with -DforkMode=always is recommended
- */
-@RunWith(Suite.class)
-@SuiteClasses({
-    CassandraConfigTest.class,
-    CassandraDirectPersistenceTest.class,
-    CassandraSessionImplTest.class,
-    DatasourceSerializationTest.class,
-    DDLGeneratorTest.class,
-    IgnitePersistentStoreTest.class,
-    IgnitePersistentStorePrimitiveTest.class})
-public class IgniteCassandraStoreTestSuite {
-    /** */
-    private static final Logger LOGGER = LogManager.getLogger(IgniteCassandraStoreTestSuite.class.getName());
-
-    /** */
-    @BeforeClass
-    public static void setUpClass() {
-        JUnitTeamcityReporter.suite = IgniteCassandraStoreTestSuite.class.getName();
-
-        if (CassandraHelper.useEmbeddedCassandra()) {
-            try {
-                CassandraHelper.startEmbeddedCassandra(LOGGER);
-            }
-            catch (Throwable e) {
-                throw new RuntimeException("Failed to start embedded Cassandra instance", e);
-            }
-        }
-    }
-
-    /** */
-    @AfterClass
-    public static void tearDownClass() {
-        if (CassandraHelper.useEmbeddedCassandra()) {
-            try {
-                CassandraHelper.stopEmbeddedCassandra();
-            }
-            catch (Throwable e) {
-                LOGGER.error("Failed to stop embedded Cassandra instance", e);
-            }
-        }
-    }
-}
diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/package-info.java b/modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/package-info.java
deleted file mode 100644
index b7447b0b4659c..0000000000000
--- a/modules/cassandra/store/src/test/java/org/apache/ignite/testsuites/cassandra/store/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Contains test suite for unit tests
- */
-package org.apache.ignite.testsuites.cassandra.store;
diff --git a/modules/cassandra/store/src/test/resources/log4j2.properties b/modules/cassandra/store/src/test/resources/log4j2.properties
deleted file mode 100644
index 9e79b91ffc1c0..0000000000000
--- a/modules/cassandra/store/src/test/resources/log4j2.properties
+++ /dev/null
@@ -1,178 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-rootLogger.level=info
-rootLogger.appenderRef.$1.ref=stdout
-
-# Direct log messages to stdout
-appender.stdout.name=stdout
-appender.stdout.type=Console
-appender.stdout.target=SYSTEM_OUT
-appender.stdout.layout.type=PatternLayout
-appender.stdout.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-# ------ LOAD TESTS LOGGING ------
-
-# Cassandra load tests loggers
-logger.CassandraLoadTests.name=CassandraLoadTests
-logger.CassandraLoadTests.level=INFO
-logger.CassandraLoadTests.appenderRef.$1.ref=stdout
-logger.CassandraLoadTests.appenderRef.$2.ref=cassandraLoadTests
-
-logger.CassandraWriteLoadTest.name=CassandraWriteLoadTest
-logger.CassandraWriteLoadTest.level=INFO
-logger.CassandraWriteLoadTest.appenderRef.$1.ref=csWrite
-
-logger.CassandraBulkWriteLoadTest.name=CassandraBulkWriteLoadTest
-logger.CassandraBulkWriteLoadTest.level=INFO
-logger.CassandraBulkWriteLoadTest.appenderRef.$1.ref =csBulkWrite
-
-logger.CassandraReadLoadTest.name=CassandraReadLoadTest
-logger.CassandraReadLoadTest.level=INFO
-logger.CassandraReadLoadTest.appenderRef.$1.ref=csRead
-
-logger.CassandraBulkReadLoadTest.name=CassandraBulkReadLoadTest
-logger.CassandraBulkReadLoadTest.level=INFO
-logger.CassandraBulkReadLoadTest.appenderRef.$1.ref=csRead
-
-appender.csWrite.type=RollingFile
-appender.csWrite.name=csWrite
-appender.csWrite.fileName=logs/cassandra-write.log
-appender.csWrite.filePattern=logs/cassandra-write.log%i
-appender.csWrite.policies.type=SizeBasedTriggeringPolicy
-appender.csWrite.policies.size=10MB
-appender.csWrite.strategy.type=DefaultRolloverStrategy
-appender.csWrite.strategy.max=10
-appender.csWrite.append=true
-appender.csWrite.layout.type=PatternLayout
-appender.csWrite.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-appender.csBulkWrite.type=RollingFile
-appender.csBulkWrite.name=csBulkWrite
-appender.csBulkWrite.fileName=logs/cassandra-bulk-write.log
-appender.csBulkWrite.filePattern=logs/cassandra-bulk-write.log%i
-appender.csBulkWrite.policies.type=SizeBasedTriggeringPolicy
-appender.csBulkWrite.policies.size=10MB
-appender.csBulkWrite.strategy.type=DefaultRolloverStrategy
-appender.csBulkWrite.strategy.max=10
-appender.csBulkWrite.append=true
-appender.csBulkWrite.layout.type=PatternLayout
-appender.csBulkWrite.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-appender.csRead.type=RollingFile
-appender.csRead.name=csRead
-appender.csRead.fileName=logs/cassandra-read.log
-appender.csRead.filePattern=logs/cassandra-read.log%i
-appender.csRead.policies.type=SizeBasedTriggeringPolicy
-appender.csRead.policies.size=10MB
-appender.csRead.strategy.type=DefaultRolloverStrategy
-appender.csRead.strategy.max=10
-appender.csRead.append=true
-appender.csRead.layout.type=PatternLayout
-appender.csRead.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-appender.csBulkRead.type=RollingFile
-appender.csBulkRead.name=csBulkRead
-appender.csBulkRead.fileName=logs/cassandra-bulk-read.log
-appender.csBulkRead.filePattern=logs/cassandra-bulk-read.log%i
-appender.csBulkRead.policies.type=SizeBasedTriggeringPolicy
-appender.csBulkRead.policies.size=10MB
-appender.csBulkRead.strategy.type=DefaultRolloverStrategy
-appender.csBulkRead.strategy.max=10
-appender.csBulkRead.append=true
-appender.csBulkRead.layout.type=PatternLayout
-appender.csBulkRead.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-# Ignite load tests loggers
-logger.IgniteLoadTests.name=CassandraLoadTests
-logger.IgniteLoadTests.level=INFO
-logger.IgniteLoadTests.appenderRef.$1.ref=stdout
-logger.IgniteLoadTests.appenderRef.$2.ref=igniteLoadTests
-
-logger.IgniteWriteLoadTest.name=CassandraWriteLoadTest
-logger.IgniteWriteLoadTest.level=INFO
-logger.IgniteWriteLoadTest.appenderRef.$1.ref=igWrite
-
-logger.IgniteBulkWriteLoadTest.name=CassandraBulkWriteLoadTest
-logger.IgniteBulkWriteLoadTest.level=INFO
-logger.IgniteBulkWriteLoadTest.appenderRef.$1.ref=igBulkWrite
-
-logger.IgniteReadLoadTest.name=CassandraReadLoadTest
-logger.IgniteReadLoadTest.level=INFO
-logger.IgniteReadLoadTest.appenderRef.$1.ref=igRead
-
-logger.IgniteBulkReadLoadTest.name=CassandraBulkReadLoadTest
-logger.IgniteBulkReadLoadTest.level=INFO
-logger.IgniteBulkReadLoadTest.appenderRef.$1.ref=igBulkRead
-
-appender.igniteLoadTests.type=RollingFile
-appender.igniteLoadTests.name=igniteLoadTests
-appender.igniteLoadTests.fileName=logs/ignite-load-tests.log
-appender.igniteLoadTests.filePattern=logs/ignite-load-tests.log%i
-appender.igniteLoadTests.policies.type=SizeBasedTriggeringPolicy
-appender.igniteLoadTests.policies.size=10MB
-appender.igniteLoadTests.strategy.type=DefaultRolloverStrategy
-appender.igniteLoadTests.strategy.max=10
-appender.igniteLoadTests.append=true
-appender.igniteLoadTests.layout.type=PatternLayout
-appender.igniteLoadTests.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-appender.igWrite.type=RollingFile
-appender.igWrite.name=igWrite
-appender.igWrite.fileName=logs/ignite-write.log
-appender.igWrite.filePattern=logs/ignite-write.log%i
-appender.igWrite.policies.type=SizeBasedTriggeringPolicy
-appender.igWrite.policies.size=10MB
-appender.igWrite.strategy.type=DefaultRolloverStrategy
-appender.igWrite.strategy.max=10
-appender.igWrite.append=true
-appender.igWrite.layout.type=PatternLayout
-appender.igWrite.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-appender.igBulkWrite.type=RollingFile
-appender.igBulkWrite.name=igBulkWrite
-appender.igBulkWrite.fileName=logs/ignite-bulk-write.log
-appender.igBulkWrite.filePattern=logs/ignite-bulk-write.log%i
-appender.igBulkWrite.policies.type=SizeBasedTriggeringPolicy
-appender.igBulkWrite.policies.size=10MB
-appender.igBulkWrite.strategy.type=DefaultRolloverStrategy
-appender.igBulkWrite.strategy.max=10
-appender.igBulkWrite.append=true
-appender.igBulkWrite.layout.type=PatternLayout
-appender.igBulkWrite.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-appender.igRead.type=RollingFile
-appender.igRead.name=igRead
-appender.igRead.fileName=logs/ignite-read.log
-appender.igRead.filePattern=logs/ignite-read.log%i
-appender.igRead.policies.type=SizeBasedTriggeringPolicy
-appender.igRead.policies.size=10MB
-appender.igRead.strategy.type=DefaultRolloverStrategy
-appender.igRead.strategy.max=10
-appender.igRead.append=true
-appender.igRead.layout.type=PatternLayout
-appender.igRead.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
-
-appender.igBulkRead.type=RollingFile
-appender.igBulkRead.name=igBulkRead
-appender.igBulkRead.fileName=logs/ignite-bulk-read.log
-appender.igBulkRead.filePattern=logs/ignite-bulk-read.log%i
-appender.igBulkRead.policies.type=SizeBasedTriggeringPolicy
-appender.igBulkRead.policies.size=10MB
-appender.igBulkRead.strategy.type=DefaultRolloverStrategy
-appender.igBulkRead.strategy.max=10
-appender.igBulkRead.append=true
-appender.igBulkRead.layout.type=PatternLayout
-appender.igBulkRead.layout.pattern=%d{HH:mm:ss,SSS} %5p [%t] - %m%n
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
deleted file mode 100644
index aec602ecb365e..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection-settings.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="
-        http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd">
-
-    <bean id="cassandraAdminCredentials" class="org.apache.ignite.tests.utils.CassandraAdminCredentials"/>
-    <bean id="cassandraRegularCredentials" class="org.apache.ignite.tests.utils.CassandraRegularCredentials"/>
-
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
-        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
-            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
-        </constructor-arg>
-    </bean>
-
-    <bean id="contactPoints" class="org.apache.ignite.tests.utils.CassandraHelper" factory-method="getContactPointsArray"/>
-
-    <bean id="cassandraAdminDataSource" class="org.apache.ignite.cache.store.cassandra.datasource.DataSource">
-        <property name="credentials" ref="cassandraAdminCredentials"/>
-        <property name="contactPoints" ref="contactPoints"/>
-        <property name="readConsistency" value="ONE"/>
-        <property name="writeConsistency" value="ONE"/>
-        <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
-    </bean>
-
-    <bean id="cassandraRegularDataSource" class="org.apache.ignite.cache.store.cassandra.datasource.DataSource">
-        <property name="credentials" ref="cassandraRegularCredentials"/>
-        <property name="contactPoints" ref="contactPoints"/>
-        <property name="readConsistency" value="ONE"/>
-        <property name="writeConsistency" value="ONE"/>
-        <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties
deleted file mode 100644
index ef150189a8992..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/connection.properties
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Comma delimited Cassandra contact points in format: host[:port]
-contact.points=127.0.0.1
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties
deleted file mode 100644
index f011bcc3b338c..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/credentials.properties
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Cassandra admin user/password
-admin.user=
-admin.password=
-
-# Cassandra regular user/password
-regular.user=
-regular.password=
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml
deleted file mode 100644
index 5089f074a41da..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/embedded-cassandra.yaml
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-cluster_name: 'Test Cluster'
-
-listen_address: 127.0.0.1
-native_transport_port: 9042
-rpc_address: 127.0.0.1
-rpc_port: 9160
-
-seed_provider:
-    - class_name: org.apache.cassandra.locator.SimpleSeedProvider
-      parameters:
-          - seeds: "127.0.0.1"
-saved_caches_directory: ./data/saved_caches
-commitlog_directory: ./data/commitlog
-hints_directory: ./data/hints
-data_file_directories:
-    - ./data/data
-cdc_raw_directory: ./data/cdc
-
-num_tokens: 256
-hinted_handoff_enabled: true
-max_hint_window_in_ms: 10800000 # 3 hours
-hinted_handoff_throttle_in_kb: 1024
-max_hints_delivery_threads: 2
-hints_flush_period_in_ms: 10000
-max_hints_file_size_in_mb: 128
-batchlog_replay_throttle_in_kb: 1024
-authenticator: AllowAllAuthenticator
-authorizer: AllowAllAuthorizer
-role_manager: CassandraRoleManager
-roles_validity_in_ms: 2000
-permissions_validity_in_ms: 2000
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-disk_failure_policy: stop
-commit_failure_policy: stop
-key_cache_size_in_mb:
-key_cache_save_period: 14400
-row_cache_size_in_mb: 0
-row_cache_save_period: 0
-counter_cache_size_in_mb:
-counter_cache_save_period: 7200
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-commitlog_segment_size_in_mb: 32
-concurrent_reads: 32
-concurrent_writes: 32
-concurrent_counter_writes: 32
-concurrent_materialized_view_writes: 32
-memtable_allocation_type: heap_buffers
-index_summary_capacity_in_mb:
-index_summary_resize_interval_in_minutes: 60
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-storage_port: 7000
-ssl_storage_port: 7001
-start_native_transport: true
-start_rpc: false
-rpc_keepalive: true
-rpc_server_type: sync
-thrift_framed_transport_size_in_mb: 15
-incremental_backups: false
-snapshot_before_compaction: false
-auto_snapshot: true
-tombstone_warn_threshold: 1000
-tombstone_failure_threshold: 100000
-column_index_size_in_kb: 64
-batch_size_warn_threshold_in_kb: 5
-batch_size_fail_threshold_in_kb: 50
-compaction_throughput_mb_per_sec: 16
-compaction_large_partition_warning_threshold_mb: 100
-sstable_preemptive_open_interval_in_mb: 50
-read_request_timeout_in_ms: 5000
-range_request_timeout_in_ms: 10000
-write_request_timeout_in_ms: 2000
-counter_write_request_timeout_in_ms: 5000
-cas_contention_timeout_in_ms: 1000
-truncate_request_timeout_in_ms: 60000
-request_timeout_in_ms: 10000
-cross_node_timeout: false
-endpoint_snitch: SimpleSnitch
-dynamic_snitch_update_interval_in_ms: 100
-dynamic_snitch_reset_interval_in_ms: 600000
-dynamic_snitch_badness_threshold: 0.1
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-server_encryption_options:
-    internode_encryption: none
-    keystore: conf/.keystore
-    keystore_password: cassandra
-    truststore: conf/.truststore
-    truststore_password: cassandra
-
-client_encryption_options:
-    enabled: false
-    optional: false
-    keystore: conf/.keystore
-    keystore_password: cassandra
-
-internode_compression: all
-inter_dc_tcp_nodelay: false
-tracetype_query_ttl: 86400
-tracetype_repair_ttl: 604800
-gc_warn_threshold_in_ms: 1000
-enable_user_defined_functions: false
-enable_scripted_user_defined_functions: false
-windows_timer_interval: 1
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties
deleted file mode 100644
index 9205cc1ed1e46..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/cassandra/keyspaces.properties
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Cassandra keyspaces used for tests
-keyspaces=test1
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
deleted file mode 100644
index cde4becdb3e6a..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/ignite-config.xml
+++ /dev/null
@@ -1,96 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="
-        http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd">
-
-    <!-- Cassandra connection settings -->
-    <import resource="classpath:org/apache/ignite/tests/cassandra/connection-settings.xml" />
-
-    <!-- Persistence settings for 'cache1' -->
-    <bean id="cache1_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'cache2' -->
-    <bean id="cache2_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml" />
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Configuring persistence for "cache1" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache1"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache1_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache2" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache2"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache2_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-            </list>
-        </property>
-
-        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <!--
-                        Ignite provides several options for automatic discovery that can be used
-                        instead os static IP based discovery. For information on all options refer
-                        to our documentation: http://apacheignite.readme.io/docs/cluster-config
-                    -->
-                    <!-- Uncomment static IP finder to enable static-based discovery of initial nodes. -->
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
-                    <!--<bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">-->
-                        <property name="addresses">
-                            <list>
-                                <!-- In distributed environment, replace with actual host IP address. -->
-                                <value>127.0.0.1:47500..47509</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml
deleted file mode 100644
index 1c1951d4e0557..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-1.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="blob_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" />
-    <valuePersistence strategy="BLOB"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml
deleted file mode 100644
index 49b3caf7e0103..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-2.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="blob_test2">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE"/>
-    <valuePersistence strategy="BLOB"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
deleted file mode 100644
index e872201fcc180..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/blob/persistence-settings-3.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="blob_test3">
-    <!-- By default Java standard serialization is going to be used -->
-    <keyPersistence class="java.lang.Long"
-                    strategy="BLOB"
-                    column="key"/>
-
-    <!-- Kryo serialization specified to be used -->
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person"
-                      strategy="BLOB"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer"
-                      column="value"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/ignite-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/ignite-config.xml
deleted file mode 100644
index 115e263e017d8..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/ignite-config.xml
+++ /dev/null
@@ -1,90 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one or more
-  ~ contributor license agreements.  See the NOTICE file distributed with
-  ~ this work for additional information regarding copyright ownership.
-  ~ The ASF licenses this file to You under the Apache License, Version 2.0
-  ~ (the "License"); you may not use this file except in compliance with
-  ~ the License.  You may obtain a copy of the License at
-  ~
-  ~      http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="
-        http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd">
-
-    <!-- Cassandra connection settings -->
-    <import resource="classpath:org/apache/ignite/tests/cassandra/connection-settings.xml"/>
-
-    <!-- Persistence settings for 'cache2' -->
-    <bean id="cache2_persistence_settings"
-          class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/loadall_blob/persistence-settings.xml"/>
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-
-        <property name="marshaller">
-            <bean class="org.apache.ignite.internal.binary.BinaryMarshaller"/>
-        </property>
-
-        <property name="binaryConfiguration">
-            <bean class="org.apache.ignite.configuration.BinaryConfiguration">
-                <property name="compactFooter" value="false"/>
-            </bean>
-        </property>
-
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Configuring persistence for "cache2" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache2"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="storeKeepBinary" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache2_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-            </list>
-        </property>
-
-        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <!--
-                        Ignite provides several options for automatic discovery that can be used
-                        instead os static IP based discovery. For information on all options refer
-                        to our documentation: http://apacheignite.readme.io/docs/cluster-config
-                    -->
-                    <!-- Uncomment static IP finder to enable static-based discovery of initial nodes. -->
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
-                        <!--<bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">-->
-                        <property name="addresses">
-                            <list>
-                                <!-- In distributed environment, replace with actual host IP address. -->
-                                <value>127.0.0.1:47500..47509</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/persistence-settings.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/persistence-settings.xml
deleted file mode 100644
index e872201fcc180..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/loadall_blob/persistence-settings.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="blob_test3">
-    <!-- By default Java standard serialization is going to be used -->
-    <keyPersistence class="java.lang.Long"
-                    strategy="BLOB"
-                    column="key"/>
-
-    <!-- Kryo serialization specified to be used -->
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person"
-                      strategy="BLOB"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer"
-                      column="value"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
deleted file mode 100644
index 4105b3dfbea4f..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/ignite-config.xml
+++ /dev/null
@@ -1,212 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="
-        http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd">
-
-    <!-- Cassandra connection settings -->
-    <import resource="classpath:org/apache/ignite/tests/cassandra/connection-settings.xml" />
-
-    <!-- Persistence settings for 'cache1' -->
-    <bean id="cache1_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'cache2' -->
-    <bean id="cache2_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'cache3' -->
-    <bean id="cache3_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'cache4' -->
-    <bean id="cache4_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'cache5' -->
-    <bean id="cache5_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-5.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'cache6' -->
-    <bean id="cache6_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/pojo/persistence-settings-6.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'product' -->
-    <bean id="product_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/pojo/product.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'order' -->
-    <bean id="order_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource" value="classpath:org/apache/ignite/tests/persistence/pojo/order.xml" />
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Configuring persistence for "cache1" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache1"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache1_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache2" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache2"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache2_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache3" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache3"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache3_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache4" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache4"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache4_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache5" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache5"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache5_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache6" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache6"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache6_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "product" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="product"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="atomicityMode" value="TRANSACTIONAL"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="product_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "order" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="order"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="atomicityMode" value="TRANSACTIONAL"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="order_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-            </list>
-        </property>
-
-        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <!--
-                        Ignite provides several options for automatic discovery that can be used
-                        instead os static IP based discovery. For information on all options refer
-                        to our documentation: http://apacheignite.readme.io/docs/cluster-config
-                    -->
-                    <!-- Uncomment static IP finder to enable static-based discovery of initial nodes. -->
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
-                    <!--<bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">-->
-                        <property name="addresses">
-                            <list>
-                                <!-- In distributed environment, replace with actual host IP address. -->
-                                <value>127.0.0.1:47500..47509</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/order.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/order.xml
deleted file mode 100644
index d6163643fae44..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/order.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="order">
-    <keyPersistence class="java.lang.Long" column="id" strategy="PRIMITIVE" />
-    <valuePersistence class="org.apache.ignite.tests.pojos.ProductOrder" strategy="POJO" />
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml
deleted file mode 100644
index b39578c0991ed..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="pojo_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE"/>
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person" strategy="POJO"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml
deleted file mode 100644
index 10a2d9f6244d3..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="pojo_test2">
-    <keyPersistence class="org.apache.ignite.tests.pojos.PersonId" strategy="POJO"/>
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person" strategy="POJO"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
deleted file mode 100644
index f602508cf1afb..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
+++ /dev/null
@@ -1,175 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-Root container for persistence settings configuration.
-
-Note: required element
-
-Attributes:
-  1) keyspace [required] - keyspace for Cassandra tables which should be used to store key/value pairs
-  2) table    [required] - Cassandra tables which should be used to store key/value pairs
-  3) ttl      [optional] - expiration period for the table rows (in seconds)
--->
-<persistence keyspace="test1" table="pojo_test3" ttl="86400">
-    <!--
-    Cassandra keyspace options which should be used to create provided keyspace if it doesn't exist.
-
-    Note: optional element
-    -->
-    <keyspaceOptions>
-        REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 3}
-        AND DURABLE_WRITES = true
-    </keyspaceOptions>
-
-    <!--
-    Cassandra table options which should be used to create provided table if it doesn't exist.
-
-    Note: optional element
-    -->
-    <tableOptions>
-        comment = 'A most excellent and useful table'
-        AND read_repair_chance = 0.2
-    </tableOptions>
-
-    <!--
-    Persistent settings for Ignite cache keys.
-
-    Note: required element
-
-    Attributes:
-      1) class      [required] - java class name for Ignite cache key
-      2) strategy   [required] - one of three possible persistent strategies which controls how object
-        should be persisted/loaded to/from Cassandra table:
-            a) PRIMITIVE - stores key value as is, by mapping it to Cassandra table column with corresponding type.
-                Should be used only for simple java types (int, long, String, double, Date) which could be mapped
-                to corresponding Cassadra types.
-            b) BLOB - stores key value as BLOB, by mapping it to Cassandra table column with blob type.
-                Could be used for any java object. Conversion of java object to BLOB is handled by "serializer"
-                which could be specified in serializer attribute (see below).
-            c) POJO - stores each field of an object as a column having corresponding type in Cassandra table.
-                Provides ability to utilize Cassandra secondary indexes for object fields.
-      3) serializer [optional] - specifies serializer class for BLOB strategy. Shouldn't be used for PRIMITIVE and
-        POJO strategies. Available implementations:
-            a) org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer - uses standard Java
-                serialization framework
-            b) org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer - uses Kryo
-                serialization framework
-      4) column     [optional] - specifies column name for PRIMITIVE and BLOB strategies where to store key value.
-        If not specified column having 'key' name will be used. Shouldn't be used for POJO strategy.
-    -->
-    <keyPersistence class="org.apache.ignite.tests.pojos.PersonId" strategy="POJO">
-        <!--
-        Partition key fields if POJO strategy used.
-
-        Note: optional element, only required for POJO strategy in case you want to manually specify
-            POJO fields to Cassandra columns mapping, instead of relying on dynamic discovering of
-            POJO fields and mapping them to the same columns of Cassandra table.
-        -->
-        <partitionKey>
-            <!--
-             Mapping from POJO field to Cassandra table column.
-
-             Note: required element
-
-             Attributes:
-               1) name   [required] - POJO field name
-               2) column [optional] - Cassandra table column name. If not specified lowercase
-                  POJO field name will be used.
-            -->
-            <field name="companyCode" column="company" />
-            <field name="departmentCode" column="department" />
-        </partitionKey>
-
-        <!--
-        Cluster key fields if POJO strategy used.
-
-        Note: optional element, only required for POJO strategy in case you want to manually specify
-            POJO fields to Cassandra columns mapping, instead of relying on dynamic discovering of
-            POJO fields and mapping them to the same columns of Cassandra table.
-        -->
-        <clusterKey>
-            <!--
-             Mapping from POJO field to Cassandra table column.
-
-             Note: required element
-
-             Attributes:
-               1) name   [required] - POJO field name
-               2) column [optional] - Cassandra table column name. If not specified lowercase
-                  POJO field name will be used.
-               3) sort   [optional] - specifies sort order (**asc** or **desc**)
-            -->
-            <field name="personNumber" column="number" sort="desc"/>
-        </clusterKey>
-    </keyPersistence>
-
-    <!--
-    Persistent settings for Ignite cache values.
-
-    Note: required element
-
-    Attributes:
-      1) class      [required] - java class name for Ignite cache value
-      2) strategy   [required] - one of three possible persistent strategies which controls how object
-        should be persisted/loaded to/from Cassandra table:
-            a) PRIMITIVE - stores key value as is, by mapping it to Cassandra table column with corresponding type.
-                Should be used only for simple java types (int, long, String, double, Date) which could be mapped
-                to corresponding Cassadra types.
-            b) BLOB - stores key value as BLOB, by mapping it to Cassandra table column with blob type.
-                Could be used for any java object. Conversion of java object to BLOB is handled by "serializer"
-                which could be specified in serializer attribute (see below).
-            c) POJO - stores each field of an object as a column having corresponding type in Cassandra table.
-                Provides ability to utilize Cassandra secondary indexes for object fields.
-      3) serializer [optional] - specifies serializer class for BLOB strategy. Shouldn't be used for PRIMITIVE and
-        POJO strategies. Available implementations:
-            a) org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer - uses standard Java
-                serialization framework
-            b) org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer - uses Kryo
-                serialization framework
-      4) column     [optional] - specifies column name for PRIMITIVE and BLOB strategies where to store value.
-        If not specified column having 'value' name will be used. Shouldn't be used for POJO strategy.
-    -->
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person"
-                      strategy="POJO"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer">
-        <!--
-         Mapping from POJO field to Cassandra table column.
-
-         Note: required element
-
-         Attributes:
-           1) name         [required] - POJO field name
-           2) column       [optional] - Cassandra table column name. If not specified lowercase
-              POJO field name will be used.
-           3) static       [optional] - boolean flag which specifies that column is static withing a given partition
-           4) index        [optional] - boolean flag specifying that secondary index should be created for the field
-           5) indexClass   [optional] - custom index java class name, in case you want to use custom index
-           6) indexOptions [optional] - custom index options
-        -->
-        <field name="personNumber" column="number" />
-        <field name="firstName" column="first_name" />
-        <field name="lastName" column="last_name" />
-        <field name="fullName" />
-        <field name="age" />
-        <field name="married" index="true"/>
-        <field name="height" />
-        <field name="weight" />
-        <field name="birthDate" column="birth_date" />
-        <field name="phones" />
-    </valuePersistence>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml
deleted file mode 100644
index 490d8e76dc8ef..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml
+++ /dev/null
@@ -1,175 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-Root container for persistence settings configuration.
-
-Note: required element
-
-Attributes:
-  1) keyspace [required] - keyspace for Cassandra tables which should be used to store key/value pairs
-  2) table    [required] - Cassandra tables which should be used to store key/value pairs
-  3) ttl      [optional] - expiration period for the table rows (in seconds)
--->
-<persistence keyspace="test1" ttl="86400">
-    <!--
-    Cassandra keyspace options which should be used to create provided keyspace if it doesn't exist.
-
-    Note: optional element
-    -->
-    <keyspaceOptions>
-        REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 3}
-        AND DURABLE_WRITES = true
-    </keyspaceOptions>
-
-    <!--
-    Cassandra table options which should be used to create provided table if it doesn't exist.
-
-    Note: optional element
-    -->
-    <tableOptions>
-        comment = 'A most excellent and useful table'
-        AND read_repair_chance = 0.2
-    </tableOptions>
-
-    <!--
-    Persistent settings for Ignite cache keys.
-
-    Note: required element
-
-    Attributes:
-      1) class      [required] - java class name for Ignite cache key
-      2) strategy   [required] - one of three possible persistent strategies which controls how object
-        should be persisted/loaded to/from Cassandra table:
-            a) PRIMITIVE - stores key value as is, by mapping it to Cassandra table column with corresponding type.
-                Should be used only for simple java types (int, long, String, double, Date) which could be mapped
-                to corresponding Cassadra types.
-            b) BLOB - stores key value as BLOB, by mapping it to Cassandra table column with blob type.
-                Could be used for any java object. Conversion of java object to BLOB is handled by "serializer"
-                which could be specified in serializer attribute (see below).
-            c) POJO - stores each field of an object as a column having corresponding type in Cassandra table.
-                Provides ability to utilize Cassandra secondary indexes for object fields.
-      3) serializer [optional] - specifies serializer class for BLOB strategy. Shouldn't be used for PRIMITIVE and
-        POJO strategies. Available implementations:
-            a) org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer - uses standard Java
-                serialization framework
-            b) org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer - uses Kryo
-                serialization framework
-      4) column     [optional] - specifies column name for PRIMITIVE and BLOB strategies where to store key value.
-        If not specified column having 'key' name will be used. Shouldn't be used for POJO strategy.
-    -->
-    <keyPersistence class="org.apache.ignite.tests.pojos.PersonId" strategy="POJO">
-        <!--
-        Partition key fields if POJO strategy used.
-
-        Note: optional element, only required for POJO strategy in case you want to manually specify
-            POJO fields to Cassandra columns mapping, instead of relying on dynamic discovering of
-            POJO fields and mapping them to the same columns of Cassandra table.
-        -->
-        <partitionKey>
-            <!--
-             Mapping from POJO field to Cassandra table column.
-
-             Note: required element
-
-             Attributes:
-               1) name   [required] - POJO field name
-               2) column [optional] - Cassandra table column name. If not specified lowercase
-                  POJO field name will be used.
-            -->
-            <field name="companyCode" column="company" />
-            <field name="departmentCode" column="department" />
-        </partitionKey>
-
-        <!--
-        Cluster key fields if POJO strategy used.
-
-        Note: optional element, only required for POJO strategy in case you want to manually specify
-            POJO fields to Cassandra columns mapping, instead of relying on dynamic discovering of
-            POJO fields and mapping them to the same columns of Cassandra table.
-        -->
-        <clusterKey>
-            <!--
-             Mapping from POJO field to Cassandra table column.
-
-             Note: required element
-
-             Attributes:
-               1) name   [required] - POJO field name
-               2) column [optional] - Cassandra table column name. If not specified lowercase
-                  POJO field name will be used.
-               3) sort   [optional] - specifies sort order (**asc** or **desc**)
-            -->
-            <field name="personNumber" column="number" sort="desc"/>
-        </clusterKey>
-    </keyPersistence>
-
-    <!--
-    Persistent settings for Ignite cache values.
-
-    Note: required element
-
-    Attributes:
-      1) class      [required] - java class name for Ignite cache value
-      2) strategy   [required] - one of three possible persistent strategies which controls how object
-        should be persisted/loaded to/from Cassandra table:
-            a) PRIMITIVE - stores key value as is, by mapping it to Cassandra table column with corresponding type.
-                Should be used only for simple java types (int, long, String, double, Date) which could be mapped
-                to corresponding Cassadra types.
-            b) BLOB - stores key value as BLOB, by mapping it to Cassandra table column with blob type.
-                Could be used for any java object. Conversion of java object to BLOB is handled by "serializer"
-                which could be specified in serializer attribute (see below).
-            c) POJO - stores each field of an object as a column having corresponding type in Cassandra table.
-                Provides ability to utilize Cassandra secondary indexes for object fields.
-      3) serializer [optional] - specifies serializer class for BLOB strategy. Shouldn't be used for PRIMITIVE and
-        POJO strategies. Available implementations:
-            a) org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer - uses standard Java
-                serialization framework
-            b) org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer - uses Kryo
-                serialization framework
-      4) column     [optional] - specifies column name for PRIMITIVE and BLOB strategies where to store value.
-        If not specified column having 'value' name will be used. Shouldn't be used for POJO strategy.
-    -->
-    <valuePersistence class="org.apache.ignite.tests.pojos.Person"
-                      strategy="POJO"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer">
-        <!--
-         Mapping from POJO field to Cassandra table column.
-
-         Note: required element
-
-         Attributes:
-           1) name         [required] - POJO field name
-           2) column       [optional] - Cassandra table column name. If not specified lowercase
-              POJO field name will be used.
-           3) static       [optional] - boolean flag which specifies that column is static withing a given partition
-           4) index        [optional] - boolean flag specifying that secondary index should be created for the field
-           5) indexClass   [optional] - custom index java class name, in case you want to use custom index
-           6) indexOptions [optional] - custom index options
-        -->
-        <field name="personNumber" column="number" />
-        <field name="firstName" column="first_name" />
-        <field name="lastName" column="last_name" />
-        <field name="fullName" />
-        <field name="age" />
-        <field name="married" index="true"/>
-        <field name="height" />
-        <field name="weight" />
-        <field name="birthDate" column="birth_date" />
-        <field name="phones" />
-    </valuePersistence>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-5.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-5.xml
deleted file mode 100644
index f4210b8ba4844..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-5.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="pojo_test5">
-    <keyPersistence class="org.apache.ignite.tests.pojos.SimplePersonId" strategy="POJO"/>
-    <valuePersistence class="org.apache.ignite.tests.pojos.SimplePerson" strategy="POJO"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-6.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-6.xml
deleted file mode 100644
index 340f64615a82c..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/persistence-settings-6.xml
+++ /dev/null
@@ -1,174 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-Root container for persistence settings configuration.
-
-Note: required element
-
-Attributes:
-  1) keyspace [required] - keyspace for Cassandra tables which should be used to store key/value pairs
-  2) table    [required] - Cassandra tables which should be used to store key/value pairs
-  3) ttl      [optional] - expiration period for the table rows (in seconds)
--->
-<persistence keyspace="test1" table="pojo_test6" ttl="86400">
-    <!--
-    Cassandra keyspace options which should be used to create provided keyspace if it doesn't exist.
-
-    Note: optional element
-    -->
-    <keyspaceOptions>
-        REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 3}
-        AND DURABLE_WRITES = true
-    </keyspaceOptions>
-
-    <!--
-    Cassandra table options which should be used to create provided table if it doesn't exist.
-
-    Note: optional element
-    -->
-    <tableOptions>
-        comment = 'A most excellent and useful table'
-        AND read_repair_chance = 0.2
-    </tableOptions>
-
-    <!--
-    Persistent settings for Ignite cache keys.
-
-    Note: required element
-
-    Attributes:
-      1) class      [required] - java class name for Ignite cache key
-      2) strategy   [required] - one of three possible persistent strategies which controls how object
-        should be persisted/loaded to/from Cassandra table:
-            a) PRIMITIVE - stores key value as is, by mapping it to Cassandra table column with corresponding type.
-                Should be used only for simple java types (int, long, String, double, Date) which could be mapped
-                to corresponding Cassadra types.
-            b) BLOB - stores key value as BLOB, by mapping it to Cassandra table column with blob type.
-                Could be used for any java object. Conversion of java object to BLOB is handled by "serializer"
-                which could be specified in serializer attribute (see below).
-            c) POJO - stores each field of an object as a column having corresponding type in Cassandra table.
-                Provides ability to utilize Cassandra secondary indexes for object fields.
-      3) serializer [optional] - specifies serializer class for BLOB strategy. Shouldn't be used for PRIMITIVE and
-        POJO strategies. Available implementations:
-            a) org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer - uses standard Java
-                serialization framework
-            b) org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer - uses Kryo
-                serialization framework
-      4) column     [optional] - specifies column name for PRIMITIVE and BLOB strategies where to store key value.
-        If not specified column having 'key' name will be used. Shouldn't be used for POJO strategy.
-    -->
-    <keyPersistence class="org.apache.ignite.tests.pojos.SimplePersonId" strategy="POJO">
-        <!--
-        Partition key fields if POJO strategy used.
-
-        Note: optional element, only required for POJO strategy in case you want to manually specify
-            POJO fields to Cassandra columns mapping, instead of relying on dynamic discovering of
-            POJO fields and mapping them to the same columns of Cassandra table.
-        -->
-        <partitionKey>
-            <!--
-             Mapping from POJO field to Cassandra table column.
-
-             Note: required element
-
-             Attributes:
-               1) name   [required] - POJO field name
-               2) column [optional] - Cassandra table column name. If not specified lowercase
-                  POJO field name will be used.
-            -->
-            <field name="companyCode" column="company" />
-            <field name="departmentCode" column="department" />
-        </partitionKey>
-
-        <!--
-        Cluster key fields if POJO strategy used.
-
-        Note: optional element, only required for POJO strategy in case you want to manually specify
-            POJO fields to Cassandra columns mapping, instead of relying on dynamic discovering of
-            POJO fields and mapping them to the same columns of Cassandra table.
-        -->
-        <clusterKey>
-            <!--
-             Mapping from POJO field to Cassandra table column.
-
-             Note: required element
-
-             Attributes:
-               1) name   [required] - POJO field name
-               2) column [optional] - Cassandra table column name. If not specified lowercase
-                  POJO field name will be used.
-               3) sort   [optional] - specifies sort order (**asc** or **desc**)
-            -->
-            <field name="personNum" column="number" sort="desc"/>
-        </clusterKey>
-    </keyPersistence>
-
-    <!--
-    Persistent settings for Ignite cache values.
-
-    Note: required element
-
-    Attributes:
-      1) class      [required] - java class name for Ignite cache value
-      2) strategy   [required] - one of three possible persistent strategies which controls how object
-        should be persisted/loaded to/from Cassandra table:
-            a) PRIMITIVE - stores key value as is, by mapping it to Cassandra table column with corresponding type.
-                Should be used only for simple java types (int, long, String, double, Date) which could be mapped
-                to corresponding Cassadra types.
-            b) BLOB - stores key value as BLOB, by mapping it to Cassandra table column with blob type.
-                Could be used for any java object. Conversion of java object to BLOB is handled by "serializer"
-                which could be specified in serializer attribute (see below).
-            c) POJO - stores each field of an object as a column having corresponding type in Cassandra table.
-                Provides ability to utilize Cassandra secondary indexes for object fields.
-      3) serializer [optional] - specifies serializer class for BLOB strategy. Shouldn't be used for PRIMITIVE and
-        POJO strategies. Available implementations:
-            a) org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer - uses standard Java
-                serialization framework
-            b) org.apache.ignite.cache.store.cassandra.serializer.KryoSerializer - uses Kryo
-                serialization framework
-      4) column     [optional] - specifies column name for PRIMITIVE and BLOB strategies where to store value.
-        If not specified column having 'value' name will be used. Shouldn't be used for POJO strategy.
-    -->
-    <valuePersistence class="org.apache.ignite.tests.pojos.SimplePerson"
-                      strategy="POJO"
-                      serializer="org.apache.ignite.cache.store.cassandra.serializer.JavaSerializer">
-        <!--
-         Mapping from POJO field to Cassandra table column.
-
-         Note: required element
-
-         Attributes:
-           1) name         [required] - POJO field name
-           2) column       [optional] - Cassandra table column name. If not specified lowercase
-              POJO field name will be used.
-           3) static       [optional] - boolean flag which specifies that column is static withing a given partition
-           4) index        [optional] - boolean flag specifying that secondary index should be created for the field
-           5) indexClass   [optional] - custom index java class name, in case you want to use custom index
-           6) indexOptions [optional] - custom index options
-        -->
-        <field name="personNum" column="number" />
-        <field name="firstName" column="first_name" />
-        <field name="lastName" column="last_name" />
-        <field name="age" />
-        <field name="married" index="true"/>
-        <field name="height" />
-        <field name="weight" />
-        <field name="birthDate" column="birth_date" />
-        <field name="phones" />
-    </valuePersistence>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/product.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/product.xml
deleted file mode 100644
index c761e1c357191..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/pojo/product.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="product">
-    <keyPersistence class="java.lang.Long" column="id" strategy="PRIMITIVE" />
-    <valuePersistence class="org.apache.ignite.tests.pojos.Product" strategy="POJO" />
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
deleted file mode 100644
index 99091fa8d274f..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-config.xml
+++ /dev/null
@@ -1,96 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xsi:schemaLocation="
-        http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd">
-
-    <!-- Cassandra connection settings -->
-    <import resource="classpath:org/apache/ignite/tests/cassandra/connection-settings.xml" />
-
-    <!-- Persistence settings for 'cache1' -->
-    <bean id="cache1_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml" />
-    </bean>
-
-    <!-- Persistence settings for 'cache2' -->
-    <bean id="cache2_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="org.springframework.core.io.Resource"
-                         value="classpath:org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml" />
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Configuring persistence for "cache1" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache1"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache1_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-                <!-- Configuring persistence for "cache2" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache2"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache2_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-            </list>
-        </property>
-
-        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <!--
-                        Ignite provides several options for automatic discovery that can be used
-                        instead os static IP based discovery. For information on all options refer
-                        to our documentation: http://apacheignite.readme.io/docs/cluster-config
-                    -->
-                    <!-- Uncomment static IP finder to enable static-based discovery of initial nodes. -->
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
-                    <!--<bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">-->
-                        <property name="addresses">
-                            <list>
-                                <!-- In distributed environment, replace with actual host IP address. -->
-                                <value>127.0.0.1:47500..47509</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
deleted file mode 100644
index 5b5bb597aee82..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
+++ /dev/null
@@ -1,99 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:util="http://www.springframework.org/schema/util"
-       xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd
-        http://www.springframework.org/schema/util
-        http://www.springframework.org/schema/util/spring-util.xsd">
-
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
-        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
-            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
-        </constructor-arg>
-    </bean>
-
-    <util:list id="contactPoints" value-type="java.lang.String">
-        <value>cassandra-node-1.abc.com</value>
-        <value>cassandra-node-2.abc.com</value>
-    </util:list>
-
-    <bean id="cassandraAdminDataSource" class="org.apache.ignite.cache.store.cassandra.datasource.DataSource">
-        <property name="user" value="cassandra"/>
-        <property name="password" value="cassandra"/>
-        <property name="contactPoints" ref="contactPoints"/>
-        <property name="readConsistency" value="ONE"/>
-        <property name="writeConsistency" value="ONE"/>
-        <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
-    </bean>
-
-    <bean id="cache1_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="primitive_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" column="key"/>
-    <valuePersistence class="java.lang.Long" strategy="PRIMITIVE" column="value"/>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-        <!-- Turn on client mode -->
-        <property name="clientMode" value="true"/>
-
-        <property name="metricsLogFrequency" value="0"/>
-
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Configuring persistence for "cache1" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache1"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache1_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-            </list>
-        </property>
-
-        <!-- Configuring remote ignite cluster connections -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
-                        <property name="addresses">
-                            <list>
-                                <value>ignite-node-1</value>
-                                <value>ignite-node-2</value>
-                                <value>ignite-node-3</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
deleted file mode 100644
index e8852c020a0d1..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/ignite-remote-server-config.xml
+++ /dev/null
@@ -1,110 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:util="http://www.springframework.org/schema/util"
-       xsi:schemaLocation="http://www.springframework.org/schema/beans
-        http://www.springframework.org/schema/beans/spring-beans.xsd
-        http://www.springframework.org/schema/util
-        http://www.springframework.org/schema/util/spring-util.xsd">
-
-    <!-- Cassandra connection settings -->
-    <bean id="loadBalancingPolicy" class="com.datastax.driver.core.policies.TokenAwarePolicy">
-        <constructor-arg type="com.datastax.driver.core.policies.LoadBalancingPolicy">
-            <bean class="com.datastax.driver.core.policies.RoundRobinPolicy"/>
-        </constructor-arg>
-    </bean>
-
-    <util:list id="contactPoints" value-type="java.lang.String">
-        <value>cassandra-node-1.abc.com</value>
-        <value>cassandra-node-2.abc.com</value>
-    </util:list>
-
-    <bean id="cassandraAdminDataSource" class="org.apache.ignite.cache.store.cassandra.datasource.DataSource">
-        <property name="user" value="cassandra"/>
-        <property name="password" value="cassandra"/>
-        <property name="contactPoints" ref="contactPoints"/>
-        <property name="readConsistency" value="ONE"/>
-        <property name="writeConsistency" value="ONE"/>
-        <property name="loadBalancingPolicy" ref="loadBalancingPolicy"/>
-    </bean>
-
-    <!-- Persistence settings for 'cache1' -->
-    <bean id="cache1_persistence_settings" class="org.apache.ignite.cache.store.cassandra.persistence.KeyValuePersistenceSettings">
-        <constructor-arg type="java.lang.String">
-            <value><![CDATA[
-<persistence keyspace="test1" table="primitive_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" column="key"/>
-    <valuePersistence class="java.lang.Long" strategy="PRIMITIVE" column="value"/>
-</persistence>]]>
-            </value>
-        </constructor-arg>
-    </bean>
-
-    <!-- Ignite configuration -->
-    <bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
-
-        <property name="cacheConfiguration">
-            <list>
-                <!-- Partitioned cache example configuration (Atomic mode). -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="atomicityMode" value="ATOMIC"/>
-                    <property name="backups" value="1"/>
-                </bean>
-
-                <!-- Configuring persistence for "cache1" cache -->
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache1"/>
-                    <property name="readThrough" value="true"/>
-                    <property name="writeThrough" value="true"/>
-                    <property name="cacheStoreFactory">
-                        <bean class="org.apache.ignite.cache.store.cassandra.CassandraCacheStoreFactory">
-                            <property name="dataSourceBean" value="cassandraAdminDataSource"/>
-                            <property name="persistenceSettingsBean" value="cache1_persistence_settings"/>
-                        </bean>
-                    </property>
-                </bean>
-
-            </list>
-        </property>
-
-        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
-        <property name="discoverySpi">
-            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
-                <property name="ipFinder">
-                    <!--
-                        Ignite provides several options for automatic discovery that can be used
-                        instead os static IP based discovery. For information on all options refer
-                        to our documentation: http://apacheignite.readme.io/docs/cluster-config
-                    -->
-                    <!-- Uncomment static IP finder to enable static-based discovery of initial nodes. -->
-                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
-                    <!--<bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder">-->
-                        <property name="addresses">
-                            <list>
-                                <!-- In distributed environment, replace with actual host IP address. -->
-                                <value>127.0.0.1:47500..47509</value>
-                            </list>
-                        </property>
-                    </bean>
-                </property>
-            </bean>
-        </property>
-    </bean>
-</beans>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml
deleted file mode 100644
index 27882e631f96c..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="primitive_test1">
-    <keyPersistence class="java.lang.Long" strategy="PRIMITIVE" column="key"/>
-    <valuePersistence class="java.lang.Long" strategy="PRIMITIVE" column="value"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml b/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml
deleted file mode 100644
index 62c1f4a6004cd..0000000000000
--- a/modules/cassandra/store/src/test/resources/org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<persistence keyspace="test1" table="primitive_test2">
-    <keyPersistence class="java.lang.String" strategy="PRIMITIVE" column="key"/>
-    <valuePersistence class="java.lang.String" strategy="PRIMITIVE" column="value"/>
-</persistence>
diff --git a/modules/cassandra/store/src/test/resources/tests.properties b/modules/cassandra/store/src/test/resources/tests.properties
deleted file mode 100644
index b11f2c847b27f..0000000000000
--- a/modules/cassandra/store/src/test/resources/tests.properties
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Number of elements for CacheStore bulk operations: loadAll, writeAll, deleteAll
-bulk.operation.size=100
-
-# Number of product per transaction
-transaction.products.count=2
-
-# Number of orders per transaction
-transaction.orders.count=10
-
-# Year to use for generating new orders
-orders.year=
-
-# Month to use for generating new orders
-orders.month=
-
-# Day of month to use for generating new orders
-orders.day=
-
-# ----- Load tests settings -----
-
-# Ignite cache to be used by load tests
-load.tests.cache.name=cache1
-#load.tests.cache.name=cache3
-
-# Number of simultaneous threads for each load test
-load.tests.threads.count=10
-
-# Warm up period (in milliseconds) for each load test before starting any measurements
-load.tests.warmup.period=180000
-
-# Time for each load test execution excluding warm up period (in milliseconds)
-load.tests.execution.time=300000
-
-# Latency (in milliseconds) between two sequential requests to Cassandra/Ignite
-load.tests.requests.latency=0
-
-# Resource specifying persistence settings for all load tests
-load.tests.persistence.settings=org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml
-#load.tests.persistence.settings=org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml
-
-# Resource specifying Ignite configuration for all load tests
-load.tests.ignite.config=org/apache/ignite/tests/persistence/primitive/ignite-remote-client-config.xml
-
-# Key generator for load tests
-load.tests.key.generator=org.apache.ignite.tests.load.LongGenerator
-#load.tests.key.generator=org.apache.ignite.tests.load.PersonIdGenerator
-
-# Value generator for load tests
-load.tests.value.generator=org.apache.ignite.tests.load.LongGenerator
-#load.tests.value.generator=org.apache.ignite.tests.load.PersonGenerator
diff --git a/modules/cassandra/store/src/test/scripts/cassandra-load-tests.bat b/modules/cassandra/store/src/test/scripts/cassandra-load-tests.bat
deleted file mode 100644
index c64de1e20b9f8..0000000000000
--- a/modules/cassandra/store/src/test/scripts/cassandra-load-tests.bat
+++ /dev/null
@@ -1,41 +0,0 @@
-::
-:: Licensed to the Apache Software Foundation (ASF) under one or more
-:: contributor license agreements.  See the NOTICE file distributed with
-:: this work for additional information regarding copyright ownership.
-:: The ASF licenses this file to You under the Apache License, Version 2.0
-:: (the "License"); you may not use this file except in compliance with
-:: the License.  You may obtain a copy of the License at
-::
-::      http://www.apache.org/licenses/LICENSE-2.0
-::
-:: Unless required by applicable law or agreed to in writing, software
-:: distributed under the License is distributed on an "AS IS" BASIS,
-:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-:: See the License for the specific language governing permissions and
-:: limitations under the License.
-::
-
-echo off
-
-echo.
-
-set TESTS_CLASSPATH="%~dp0lib\*;%~dp0settings"
-
-call %~dp0jvm-opts.bat %*
-
-call java %JVM_OPTS% -cp "%TESTS_CLASSPATH%" "org.apache.ignite.tests.CassandraDirectPersistenceLoadTest"
-
-if %errorLevel% NEQ 0 (
-    echo.
-    echo --------------------------------------------------------------------------------
-    echo [ERROR] Tests execution failed
-    echo --------------------------------------------------------------------------------
-    echo.
-    exit /b %errorLevel%
-)
-
-echo.
-echo --------------------------------------------------------------------------------
-echo [INFO] Tests execution succeed
-echo --------------------------------------------------------------------------------
-echo.
diff --git a/modules/cassandra/store/src/test/scripts/cassandra-load-tests.sh b/modules/cassandra/store/src/test/scripts/cassandra-load-tests.sh
deleted file mode 100644
index dda25dc97e91d..0000000000000
--- a/modules/cassandra/store/src/test/scripts/cassandra-load-tests.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-TESTS_ROOT=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))
-TESTS_CLASSPATH="$TESTS_ROOT/lib/*:$TESTS_ROOT/settings"
-
-. $TESTS_ROOT/jvm-opt.sh $@
-
-java $JVM_OPTS -cp "$TESTS_CLASSPATH" "org.apache.ignite.tests.CassandraDirectPersistenceLoadTest"
-
-if [ $? -ne 0 ]; then
-    echo
-    echo "--------------------------------------------------------------------------------"
-    echo "[ERROR] Tests execution failed"
-    echo "--------------------------------------------------------------------------------"
-    echo
-    exit 1
-fi
-
-echo
-echo "--------------------------------------------------------------------------------"
-echo "[INFO] Tests execution succeed"
-echo "--------------------------------------------------------------------------------"
-echo
diff --git a/modules/cassandra/store/src/test/scripts/ignite-load-tests.bat b/modules/cassandra/store/src/test/scripts/ignite-load-tests.bat
deleted file mode 100644
index 5a45ffcb6a758..0000000000000
--- a/modules/cassandra/store/src/test/scripts/ignite-load-tests.bat
+++ /dev/null
@@ -1,41 +0,0 @@
-::
-:: Licensed to the Apache Software Foundation (ASF) under one or more
-:: contributor license agreements.  See the NOTICE file distributed with
-:: this work for additional information regarding copyright ownership.
-:: The ASF licenses this file to You under the Apache License, Version 2.0
-:: (the "License"); you may not use this file except in compliance with
-:: the License.  You may obtain a copy of the License at
-::
-::      http://www.apache.org/licenses/LICENSE-2.0
-::
-:: Unless required by applicable law or agreed to in writing, software
-:: distributed under the License is distributed on an "AS IS" BASIS,
-:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-:: See the License for the specific language governing permissions and
-:: limitations under the License.
-::
-
-echo off
-
-echo.
-
-set TESTS_CLASSPATH="%~dp0\lib*;%~dp0settings"
-
-call %~dp0jvm-opts.bat %*
-
-call java %JVM_OPTS% -cp "%TESTS_CLASSPATH%" "org.apache.ignite.tests.IgnitePersistentStoreLoadTest"
-
-if %errorLevel% NEQ 0 (
-    echo.
-    echo --------------------------------------------------------------------------------
-    echo [ERROR] Tests execution failed
-    echo --------------------------------------------------------------------------------
-    echo.
-    exit /b %errorLevel%
-)
-
-echo.
-echo --------------------------------------------------------------------------------
-echo [INFO] Tests execution succeed
-echo --------------------------------------------------------------------------------
-echo.
diff --git a/modules/cassandra/store/src/test/scripts/ignite-load-tests.sh b/modules/cassandra/store/src/test/scripts/ignite-load-tests.sh
deleted file mode 100644
index c2e1a419af7bf..0000000000000
--- a/modules/cassandra/store/src/test/scripts/ignite-load-tests.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-TESTS_ROOT=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))
-TESTS_CLASSPATH="$TESTS_ROOT/lib/*:$TESTS_ROOT/settings"
-
-. $TESTS_ROOT/jvm-opt.sh $@
-
-java $JVM_OPTS -cp "$TESTS_CLASSPATH" "org.apache.ignite.tests.IgnitePersistentStoreLoadTest"
-
-if [ $? -ne 0 ]; then
-    echo
-    echo "--------------------------------------------------------------------------------"
-    echo "[ERROR] Tests execution failed"
-    echo "--------------------------------------------------------------------------------"
-    echo
-    exit 1
-fi
-
-echo
-echo "--------------------------------------------------------------------------------"
-echo "[INFO] Tests execution succeed"
-echo "--------------------------------------------------------------------------------"
-echo
diff --git a/modules/cassandra/store/src/test/scripts/jvm-opt.sh b/modules/cassandra/store/src/test/scripts/jvm-opt.sh
deleted file mode 100644
index d4e70f005e593..0000000000000
--- a/modules/cassandra/store/src/test/scripts/jvm-opt.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-JVM_OPTS="-Xms6g -Xmx6g -XX:+AggressiveOpts -XX:MaxMetaspaceSize=256m"
-JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseTLAB -XX:NewSize=128m -XX:MaxNewSize=768m"
-JVM_OPTS="$JVM_OPTS -Xss16m"
diff --git a/modules/cassandra/store/src/test/scripts/jvm-opts.bat b/modules/cassandra/store/src/test/scripts/jvm-opts.bat
deleted file mode 100644
index 1937efbc9ed57..0000000000000
--- a/modules/cassandra/store/src/test/scripts/jvm-opts.bat
+++ /dev/null
@@ -1,24 +0,0 @@
-::
-:: Licensed to the Apache Software Foundation (ASF) under one or more
-:: contributor license agreements.  See the NOTICE file distributed with
-:: this work for additional information regarding copyright ownership.
-:: The ASF licenses this file to You under the Apache License, Version 2.0
-:: (the "License"); you may not use this file except in compliance with
-:: the License.  You may obtain a copy of the License at
-::
-::      http://www.apache.org/licenses/LICENSE-2.0
-::
-:: Unless required by applicable law or agreed to in writing, software
-:: distributed under the License is distributed on an "AS IS" BASIS,
-:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-:: See the License for the specific language governing permissions and
-:: limitations under the License.
-::
-
-echo off
-
-echo.
-
-set "JVM_OPTS=-Xms6g -Xmx6g -XX:+AggressiveOpts -XX:MaxMetaspaceSize=256m"
-set "JVM_OPTS=%JVM_OPTS% -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+UseTLAB -XX:NewSize=128m -XX:MaxNewSize=768m"
-set "JVM_OPTS=%JVM_OPTS% -Xss16m"
diff --git a/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.bat b/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.bat
deleted file mode 100644
index d538ea425e54e..0000000000000
--- a/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.bat
+++ /dev/null
@@ -1,41 +0,0 @@
-::
-:: Licensed to the Apache Software Foundation (ASF) under one or more
-:: contributor license agreements.  See the NOTICE file distributed with
-:: this work for additional information regarding copyright ownership.
-:: The ASF licenses this file to You under the Apache License, Version 2.0
-:: (the "License"); you may not use this file except in compliance with
-:: the License.  You may obtain a copy of the License at
-::
-::      http://www.apache.org/licenses/LICENSE-2.0
-::
-:: Unless required by applicable law or agreed to in writing, software
-:: distributed under the License is distributed on an "AS IS" BASIS,
-:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-:: See the License for the specific language governing permissions and
-:: limitations under the License.
-::
-
-echo off
-
-echo.
-
-set TESTS_CLASSPATH="%~dp0\lib*;%~dp0settings"
-
-call %~dp0jvm-opts.bat %*
-
-call java %JVM_OPTS% -cp "%TESTS_CLASSPATH%" "org.apache.ignite.tests.LoadTestsCassandraArtifactsCreator"
-
-if %errorLevel% NEQ 0 (
-    echo.
-    echo --------------------------------------------------------------------------------
-    echo [ERROR] Failed to recreate Cassandra artifacts
-    echo --------------------------------------------------------------------------------
-    echo.
-    exit /b %errorLevel%
-)
-
-echo.
-echo --------------------------------------------------------------------------------
-echo [INFO] Cassandra artifacts were successfully recreated
-echo --------------------------------------------------------------------------------
-echo.
diff --git a/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.sh b/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.sh
deleted file mode 100644
index b0f99be218b8e..0000000000000
--- a/modules/cassandra/store/src/test/scripts/recreate-cassandra-artifacts.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-TESTS_ROOT=$(readlink -m $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ))
-TESTS_CLASSPATH="$TESTS_ROOT/lib/*:$TESTS_ROOT/settings"
-
-. $TESTS_ROOT/jvm-opt.sh $@
-
-java $JVM_OPTS -cp "$TESTS_CLASSPATH" "org.apache.ignite.tests.LoadTestsCassandraArtifactsCreator"
-
-if [ $? -ne 0 ]; then
-    echo
-    echo "--------------------------------------------------------------------------------"
-    echo "[ERROR] Failed to recreate Cassandra artifacts"
-    echo "--------------------------------------------------------------------------------"
-    echo
-    exit 1
-fi
-
-echo
-echo "--------------------------------------------------------------------------------"
-echo "[INFO] Cassandra artifacts were successfully recreated"
-echo "--------------------------------------------------------------------------------"
-echo
diff --git a/modules/dev-utils/ignite-modules-test/build.gradle b/modules/dev-utils/ignite-modules-test/build.gradle
index 289a76756c2ac..4d7ee5b4f31dc 100644
--- a/modules/dev-utils/ignite-modules-test/build.gradle
+++ b/modules/dev-utils/ignite-modules-test/build.gradle
@@ -107,12 +107,6 @@ dependencies {
     */
 
     // compile group: 'org.apache.ignite', name: 'ignite-compress', version: ignVer
-
-    compile (group: 'org.apache.ignite', name: 'ignite-cassandra-store', version: ignVer) {
-        exclude group: 'org.apache.lucene'  // to remove "lucene-sandbox and analyzers-commons" package conflicts.
-
-        exclude group: 'org.apache.ignite', module: 'ignite-spring'
-    }
 }
 
 test {
diff --git a/pom.xml b/pom.xml
index d13736c3f3ee3..de4d928f2dc82 100644
--- a/pom.xml
+++ b/pom.xml
@@ -61,7 +61,6 @@
         <module>modules/zookeeper</module>
         <module>modules/web/ignite-appserver-test</module>
         <module>modules/web/ignite-websphere-test</module>
-        <module>modules/cassandra</module>
         <module>modules/kubernetes</module>
         <module>modules/sqlline</module>
         <module>modules/opencensus</module>