From 4dedb1245288ba9c25b0aa7fe0a244f0864fdb98 Mon Sep 17 00:00:00 2001 From: shirady <57721533+shirady@users.noreply.github.com> Date: Sun, 9 Apr 2023 09:25:01 +0300 Subject: [PATCH 1/2] add rule prefer-const, turn on rule no-var, and turn off logical-assignment-operators rule in .eslintrc.js Signed-off-by: shirady <57721533+shirady@users.noreply.github.com> linter changes 1. change var to let/const 2. change let to const if there was no assignment Signed-off-by: shirady <57721533+shirady@users.noreply.github.com> --- .eslintrc.js | 14 +- src/agent/agent.js | 6 +- src/agent/agent_cli.js | 96 ++++++------ src/agent/agent_wrap.js | 2 +- .../block_store_services/block_store_azure.js | 4 +- .../block_store_services/block_store_base.js | 4 +- .../block_store_services/block_store_fs.js | 18 +-- .../block_store_google.js | 7 +- .../block_store_services/block_store_mongo.js | 2 +- .../block_store_services/block_store_s3.js | 4 +- src/core/nsfs.js | 2 +- src/deploy/NVA_build/mongo_init_rs.js | 6 +- src/deploy/ec2_wrapper.js | 98 ++++++------ src/deploy/kubernetes_functions.js | 2 +- src/endpoint/blob/blob_rest.js | 2 +- src/endpoint/blob/blob_utils.js | 8 +- src/endpoint/blob/ops/blob_put_blob.js | 2 +- src/endpoint/endpoint.js | 2 +- .../s3/ops/s3_put_object_retention.js | 2 +- src/endpoint/s3/s3_rest.js | 4 +- src/endpoint/s3/s3_utils.js | 14 +- src/endpoint/sts/sts_rest.js | 2 +- src/hosted_agents/hosted_agents.js | 6 +- src/rpc/ice.js | 140 +++++++++--------- src/rpc/rpc.js | 10 +- src/rpc/rpc_base_conn.js | 4 +- src/rpc/rpc_http.js | 12 +- src/rpc/rpc_http_server.js | 10 +- src/rpc/rpc_n2n.js | 14 +- src/rpc/rpc_n2n_agent.js | 16 +- src/rpc/rpc_ntcp.js | 16 +- src/rpc/rpc_ntcp_server.js | 6 +- src/rpc/rpc_nudp.js | 22 +-- src/rpc/rpc_schema.js | 4 +- src/rpc/rpc_tcp.js | 6 +- src/rpc/rpc_tcp_server.js | 20 +-- src/rpc/stun.js | 102 ++++++------- src/sdk/dedup_options.js | 4 +- src/sdk/namespace_blob.js | 2 +- src/sdk/namespace_fs.js | 22 +-- src/sdk/namespace_merge.js | 12 +- src/sdk/namespace_multipart.js | 8 +- src/sdk/object_io.js | 2 +- src/sdk/object_sdk.js | 2 +- .../analytic_services/activity_log_store.js | 2 +- .../prometheus_reports/noobaa_core_report.js | 2 +- .../bg_services/bucket_chunks_builder.js | 2 +- src/server/bg_services/cluster_hb.js | 12 +- src/server/bg_services/cluster_master.js | 2 +- src/server/bg_services/db_cleaner.js | 2 +- src/server/bg_services/md_aggregator.js | 32 ++-- .../bg_services/replication_log_parser.js | 2 +- src/server/bg_services/replication_scanner.js | 4 +- src/server/bg_workers.js | 2 +- src/server/common_services/auth_server.js | 46 +++--- .../common_services/server_inter_process.js | 14 +- src/server/func_services/func_server.js | 2 +- src/server/index.js | 8 +- src/server/license_info.js | 2 +- src/server/node_services/host_server.js | 4 +- src/server/node_services/node_allocator.js | 18 +-- src/server/node_services/nodes_monitor.js | 38 ++--- src/server/notifications/alerts_log_store.js | 8 +- src/server/notifications/dispatcher.js | 10 +- src/server/notifications/event_server.js | 12 +- src/server/object_services/md_store.js | 2 +- src/server/object_services/object_server.js | 8 +- src/server/server_rpc.js | 46 +++--- src/server/system_services/account_server.js | 54 +++---- .../system_services/aws_usage_metering.js | 2 +- src/server/system_services/bucket_server.js | 62 ++++---- src/server/system_services/cluster_server.js | 85 ++++++----- src/server/system_services/objects/quota.js | 8 +- src/server/system_services/pool_server.js | 40 ++--- .../system_services/stats_aggregator.js | 34 ++--- src/server/system_services/system_server.js | 52 +++---- src/server/system_services/system_store.js | 44 +++--- src/server/system_services/tier_server.js | 27 ++-- src/server/utils/clustering_utils.js | 48 +++--- src/server/utils/mongo_ctrl.js | 44 +++--- src/server/utils/server_diagnostics.js | 2 +- src/server/utils/supervisor_ctrl.js | 24 +-- src/server/web_server.js | 28 ++-- .../framework/consolidate_test_reports.js | 2 +- src/test/framework/convert.js | 2 +- src/test/framework/report.js | 2 +- src/test/lambda/delete_backup_file_func.js | 8 +- src/test/lambda/denial_of_service_func.js | 16 +- src/test/lambda/word_count_func.js | 30 ++-- src/test/pipeline/account_test.js | 10 +- src/test/pipeline/dataset.js | 26 ++-- .../namespace_cache_range_read_test.js | 14 +- src/test/pipeline/namespace_test.js | 12 +- src/test/pipeline/quota_test.js | 2 +- src/test/pipeline/system_config.js | 8 +- src/test/qa/agents_matrix.js | 4 +- src/test/qa/cloud_test.js | 2 +- src/test/qa/data_availability_test.js | 14 +- src/test/qa/data_resiliency_test.js | 10 +- src/test/qa/load.js | 12 +- src/test/qa/rebuild_replicas_test.js | 22 +-- src/test/qa/reclaim_test.js | 16 +- src/test/qa/tests_report_summary.js | 2 +- src/test/scripts/ec_in_db.js | 8 +- .../ceph_s3_tests/test_ceph_s3.js | 2 +- src/test/system_tests/sanity_build_test.js | 4 +- src/test/system_tests/test_bucket_access.js | 2 +- .../test_bucket_lambda_triggers.js | 42 +++--- .../system_tests/test_bucket_placement.js | 20 +-- src/test/system_tests/test_build_chunks.js | 10 +- src/test/system_tests/test_cloud_pools.js | 50 +++---- src/test/system_tests/test_files_ul.js | 22 +-- src/test/system_tests/test_md_aggregator.js | 22 +-- src/test/system_tests/test_node_failure.js | 30 ++-- .../system_tests/test_s3_authentication.js | 44 +++--- src/test/system_tests/test_utils.js | 10 +- src/test/system_tests/upgradeonly.js | 8 +- src/test/unit_tests/coretest.js | 2 +- .../unit_tests/test_agent_blocks_reclaimer.js | 2 +- .../unit_tests/test_bucket_replication.js | 44 +++--- src/test/unit_tests/test_bucketspace.js | 28 ++-- .../unit_tests/test_bucketspace_versioning.js | 90 +++++------ src/test/unit_tests/test_chunk_coder.js | 6 +- src/test/unit_tests/test_debug_module.js | 50 +++---- src/test/unit_tests/test_encryption.js | 26 ++-- src/test/unit_tests/test_fs_utils.js | 8 +- src/test/unit_tests/test_keys_lock.js | 20 +-- src/test/unit_tests/test_lifecycle.js | 2 +- src/test/unit_tests/test_linked_list.js | 22 +-- src/test/unit_tests/test_lru.js | 8 +- src/test/unit_tests/test_mapper.js | 2 +- src/test/unit_tests/test_mdsequence.js | 2 +- src/test/unit_tests/test_namespace_cache.js | 36 ++--- src/test/unit_tests/test_namespace_fs.js | 10 +- src/test/unit_tests/test_node_allocator.js | 4 +- src/test/unit_tests/test_ns_list_objects.js | 2 +- src/test/unit_tests/test_object_io.js | 10 +- src/test/unit_tests/test_postgres_client.js | 6 +- src/test/unit_tests/test_prefetch.js | 10 +- src/test/unit_tests/test_range_stream.js | 4 +- src/test/unit_tests/test_rpc.js | 26 ++-- src/test/unit_tests/test_s3_bucket_policy.js | 3 +- src/test/unit_tests/test_s3_list_objects.js | 44 +++--- src/test/unit_tests/test_s3_ops.js | 10 +- src/test/unit_tests/test_sts.js | 98 ++++++------ src/test/unit_tests/test_wait_queue.js | 4 +- src/test/unit_tests/test_zip_utils.js | 8 +- src/test/unrelated/map_vs_object_benchmark.js | 28 ++-- src/test/unrelated/measure_bind_perf.js | 24 +-- .../spawn_lsof_issue_with_cluster.js | 16 +- src/test/unrelated/tcp_simultaneous_open.js | 24 +-- src/test/utils/agent_functions.js | 6 +- src/test/utils/basic_server_ops.js | 24 +-- src/test/utils/bucket_functions.js | 2 +- src/test/utils/cloud_functions.js | 4 +- src/test/utils/s3ops.js | 35 +++-- src/test/utils/server_functions.js | 2 +- src/tools/coding_speed.js | 6 +- src/tools/cpu_speed.js | 2 +- src/tools/events_generator.js | 10 +- src/tools/gridfs_stress.js | 2 +- src/tools/http_speed.js | 4 +- src/tools/md_blow.js | 2 +- src/tools/mem_grabber.js | 8 +- src/tools/mongo_profiler.js | 42 +++--- src/tools/mongodb_blow.js | 10 +- src/tools/mongodb_bucket_blow.js | 20 +-- src/tools/nbcat.js | 26 ++-- src/tools/ntcp_speed.js | 20 +-- src/tools/rpc_shell.js | 54 +++---- src/tools/s3cat.js | 36 ++--- src/tools/s3perf.js | 16 +- src/tools/s3select.js | 4 +- src/tools/stun_server.js | 12 +- src/upgrade/upgrade_manager.js | 6 +- .../NetStorageKit-Node-master/lib/api-auth.js | 4 +- .../lib/api-request-parser.js | 2 +- .../lib/api-request.js | 4 +- .../lib/netstorage.js | 10 +- src/util/addr_utils.js | 2 +- src/util/barrier.js | 14 +- src/util/base_diagnostics.js | 8 +- src/util/buffer_utils.js | 6 +- src/util/chunk_splitter.js | 4 +- src/util/chunk_stream.js | 6 +- src/util/cloud_utils.js | 6 +- src/util/console_wrapper.js | 10 +- src/util/dotenv.js | 36 ++--- src/util/frame_stream.js | 20 +-- src/util/fs_utils.js | 8 +- src/util/histogram.js | 18 +-- src/util/http_recorder.js | 4 +- src/util/ifconfig.js | 8 +- src/util/js_utils.js | 10 +- src/util/keys_lock.js | 2 +- src/util/linked_list.js | 20 +-- src/util/lru.js | 18 +-- src/util/lru_cache.js | 18 +-- src/util/mongo_client.js | 30 ++-- src/util/mongo_functions.js | 28 ++-- src/util/nb_native.js | 2 +- src/util/net_utils.js | 4 +- src/util/os_utils.js | 54 +++---- src/util/postgres_client.js | 16 +- src/util/rand_stream.js | 4 +- src/util/range_utils.js | 6 +- src/util/schema_utils.js | 2 +- src/util/size_utils.js | 38 ++--- src/util/slice_reader.js | 2 +- src/util/speedometer.js | 2 +- src/util/string_utils.js | 6 +- src/util/url_utils.js | 44 +++--- src/util/xml_utils.js | 4 +- 213 files changed, 1786 insertions(+), 1789 deletions(-) diff --git a/.eslintrc.js b/.eslintrc.js index e0827d0e91..8be9256342 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -169,6 +169,11 @@ module.exports = { // instead of expression (foo = function() {}) 'func-style': ['error', 'declaration', { allowArrowFunctions: true }], + "prefer-const": ["error", { + "destructuring": "all", + "ignoreReadBeforeAssign": false + }], + ////////////////////////////////////////////////////////////////////// // // // WARN // @@ -295,9 +300,6 @@ module.exports = { // we do allow _name or name_ as identifiers 'no-underscore-dangle': 'off', - // prefer to use let/const instead of var - 'no-var': 'off', - // turn off todo/fixme comments - will grep it to a different report 'no-warning-comments': 'off', @@ -319,9 +321,6 @@ module.exports = { // prefer using arrow functions for callbacks, but too much to fix 'prefer-named-capture-group': 'off', - // we prefer using const, but too much to fix - 'prefer-const': 'off', - // we prefer using destructuring, but too much to fix 'prefer-destructuring': 'off', @@ -356,5 +355,8 @@ module.exports = { //Allow spacing between template tags and their literals 'template-tag-spacing': 'off', + + // we prefer not to adopt the logical assignment operators from ES2020 + 'logical-assignment-operators': 'off' } }; diff --git a/src/agent/agent.js b/src/agent/agent.js index 6462e24dab..cadd0930fc 100644 --- a/src/agent/agent.js +++ b/src/agent/agent.js @@ -116,7 +116,7 @@ class Agent { this.node_type = 'BLOCK_STORE_S3'; this.block_store = new BlockStoreS3(block_store_options); } else if (params.cloud_info.endpoint_type === 'AZURE') { - let connection_string = cloud_utils.get_azure_new_connection_string({ + const connection_string = cloud_utils.get_azure_new_connection_string({ endpoint: params.cloud_info.endpoint, access_key: params.cloud_info.access_keys.access_key, secret_key: params.cloud_info.access_keys.secret_key @@ -326,8 +326,8 @@ class Agent { address: this.base_address }); } - let sorted_new = _.sortBy(new_list, srv => srv.address); - let sorted_old = _.sortBy(this.servers, srv => srv.address); + const sorted_new = _.sortBy(new_list, srv => srv.address); + const sorted_old = _.sortBy(this.servers, srv => srv.address); if (_.isEqual(sorted_new, sorted_old)) return P.resolve(); this.servers = new_list; return this.agent_conf.update({ diff --git a/src/agent/agent_cli.js b/src/agent/agent_cli.js index 3404e4cf0e..29afb0b218 100644 --- a/src/agent/agent_cli.js +++ b/src/agent/agent_cli.js @@ -44,7 +44,7 @@ const hosts = {}; class AgentCLI { constructor(params) { this.params = params; - var rpc = api.new_rpc(); + const rpc = api.new_rpc(); this.client = rpc.new_client(); this.s3 = new S3Auth(); this.agents = {}; @@ -78,7 +78,7 @@ class AgentCLI { * */ init() { - var self = this; + const self = this; if (self.params.access_key) { self.params.access_key = self.params.access_key.toString(); } @@ -134,20 +134,20 @@ class AgentCLI { self.params.all_storage_paths = mount_points; if (self.params.cleanup) { return P.all(_.map(self.params.all_storage_paths, storage_path_info => { - var storage_path = storage_path_info.mount; - var path_modification = storage_path.replace('/noobaa_storage/', '') + const storage_path = storage_path_info.mount; + const path_modification = storage_path.replace('/noobaa_storage/', '') .replace('/', '') .replace('.', ''); return fs_utils.folder_delete(path_modification); })); } else if (self.params.duplicate || self.params.notfound) { - let reason = self.params.duplicate ? 'duplicate' : 'notfound'; - let target_noobaa_storage = reason + '_noobaa_storage_' + Date.now(); + const reason = self.params.duplicate ? 'duplicate' : 'notfound'; + const target_noobaa_storage = reason + '_noobaa_storage_' + Date.now(); dbg.log0(`got ${reason} flag - renaming noobaa_storage to ${target_noobaa_storage}`); return P.all(_.map(self.params.all_storage_paths, storage_path_info => { // move noobaa_storage in all drives to an alternate location - let storage_path = storage_path_info.mount; - let target_path = storage_path.replace('noobaa_storage', target_noobaa_storage); + const storage_path = storage_path_info.mount; + const target_path = storage_path.replace('noobaa_storage', target_noobaa_storage); dbg.log0('moving', storage_path, 'to', target_path); return fs.promises.rename(storage_path, target_path); })) @@ -179,7 +179,7 @@ class AgentCLI { } detect_new_drives() { - var self = this; + const self = this; const retry = () => { setTimeout(() => self.detect_new_drives(), DETECT_NEW_DRIVES_INTERVAL); }; @@ -247,14 +247,14 @@ class AgentCLI { * */ load(added_storage_paths) { - var self = this; - let paths_to_work_on = added_storage_paths || self.params.all_storage_paths; - let internal_agent_prefix = 'noobaa-internal-agent-'; + const self = this; + const paths_to_work_on = added_storage_paths || self.params.all_storage_paths; + const internal_agent_prefix = 'noobaa-internal-agent-'; // TODO: This has to work on partitial relevant paths only // handle regular agents dbg.log0('Loading agents', paths_to_work_on); return P.all(_.map(paths_to_work_on, function(storage_path_info) { - var storage_path = storage_path_info.mount; + const storage_path = storage_path_info.mount; dbg.log0('root_path', storage_path); return P.resolve() .then(() => fs_utils.create_path(storage_path, fs_utils.PRIVATE_DIR_PERMISSIONS)) @@ -272,15 +272,15 @@ class AgentCLI { dbg.log0('nodes_names:', regular_node_names); return P.map(regular_node_names, node_name => { dbg.log0('node_name', node_name, 'storage_path', storage_path); - var node_path = path.join(storage_path, node_name); + const node_path = path.join(storage_path, node_name); return self.start(node_name, node_path); }); }); })) .then(storage_path_nodes => { - var nodes_scale = 0; - var number_of_new_paths = 0; - var existing_nodes_count = 0; + const nodes_scale = 0; + let number_of_new_paths = 0; + let existing_nodes_count = 0; _.each(storage_path_nodes, function(nodes) { // assumes same amount of nodes per each HD. we will take the last one. if (nodes.length) { @@ -294,7 +294,7 @@ class AgentCLI { // which asks to create at least that number of total nodes. // Please note that the scale is per storage path. if the scale is 2 and there are two HD // we will have 4 nodes. In addition, we will always scale to at least 1 node - var nodes_to_add = 0; + let nodes_to_add = 0; dbg.log0('AGENTS SCALE TO', nodes_scale); dbg.log0('AGENTS EXISTING', existing_nodes_count); dbg.log0('AGENTS NEW PATHS', number_of_new_paths); @@ -318,10 +318,10 @@ class AgentCLI { } hide_storage_folder(current_storage_path) { - var self = this; + const self = this; dbg.log0('os:', os.type()); if (os.type().indexOf('Windows') >= 0) { - var current_path = current_storage_path; + let current_path = current_storage_path; current_path = current_path.substring(0, current_path.length - 1); current_path = current_path.replace('./', ''); //hiding storage folder @@ -357,13 +357,13 @@ class AgentCLI { } create_node_helper(current_node_path_info) { - var self = this; + const self = this; return P.fcall(function() { dbg.log0('create_node_helper called with self.params', self.params); - var current_node_path = current_node_path_info.mount; - var node_name = self.params.hostname; + const current_node_path = current_node_path_info.mount; + let node_name = self.params.hostname; const noobaa_storage_dir_name = self.params.test_hostname ? 'noobaa_storage_' + self.params.test_hostname : 'noobaa_storage'; - var path_modification = current_node_path.replace('/' + noobaa_storage_dir_name + '/', '').replace(/\//g, '') + let path_modification = current_node_path.replace('/' + noobaa_storage_dir_name + '/', '').replace(/\//g, '') .replace('.', ''); //windows path_modification = path_modification.replace('\\' + noobaa_storage_dir_name + '\\', ''); @@ -374,21 +374,21 @@ class AgentCLI { node_name = node_name + '-' + path_modification.replace('/', ''); } node_name += '-' + self.params.host_id.split('-')[0]; - var node_path = path.join(current_node_path, node_name); - var token_path = path.join(node_path, 'token'); + const node_path = path.join(current_node_path, node_name); + const token_path = path.join(node_path, 'token'); dbg.log0('create new node for node name', node_name, ' path:', node_path, ' token path:', token_path); return fs_utils.file_must_not_exist(token_path) .then(function() { if (self.params.create_node_token) return; // authenticate and create a token for new nodes - var basic_auth_params = _.pick(self.params, 'system', 'role'); + const basic_auth_params = _.pick(self.params, 'system', 'role'); if (_.isEmpty(basic_auth_params)) { throw new Error("No credentials"); } else { - var secret_key = self.params.secret_key; - var auth_params_str = JSON.stringify(basic_auth_params); - var signature = self.s3.sign(secret_key, auth_params_str); - var auth_params = { + const secret_key = self.params.secret_key; + const auth_params_str = JSON.stringify(basic_auth_params); + const signature = self.s3.sign(secret_key, auth_params_str); + const auth_params = { access_key: self.params.access_key, string_to_sign: auth_params_str, signature: signature, @@ -439,8 +439,8 @@ class AgentCLI { * */ create(number_of_nodes, paths_to_work_on) { - var self = this; - let storage_paths_to_add = paths_to_work_on || self.params.all_storage_paths; + const self = this; + const storage_paths_to_add = paths_to_work_on || self.params.all_storage_paths; //create root path last. First, create all other. // for internal_agents only use root path return P.all(_.map(_.drop(storage_paths_to_add, 1), function(current_storage_path) { @@ -483,12 +483,12 @@ class AgentCLI { * */ create_some(n, paths_to_work_on) { - var self = this; + const self = this; //special case, new HD introduction to the system. adding only these new HD nodes if (n === 0) { return self.create(0, paths_to_work_on); } else { - var sem = new Semaphore(5); + const sem = new Semaphore(5); return P.all(_.times(n, function() { return sem.surround(function() { return self.create(n, paths_to_work_on); @@ -505,9 +505,9 @@ class AgentCLI { * */ async start(node_name, node_path) { - var self = this; + const self = this; dbg.log0('agent started ', node_path, node_name); - var agent = self.agents[node_name]; + let agent = self.agents[node_name]; if (!self.params.address) { self.params.address = addr_utils.format_base_address(); @@ -515,12 +515,12 @@ class AgentCLI { if (!agent) { // token wrapper is used by agent to read\write token - let token_path = path.join(node_path, 'token'); - let token_wrapper = { + const token_path = path.join(node_path, 'token'); + const token_wrapper = { read: () => fs.promises.readFile(token_path), write: token => fs_utils.replace_file(token_path, token), }; - let create_node_token_wrapper = { + const create_node_token_wrapper = { read: () => this.agent_conf.read() .then(agent_conf => agent_conf.create_node_token), write: new_token => this.agent_conf.update({ @@ -572,8 +572,8 @@ class AgentCLI { * */ stop(node_name) { - var self = this; - var agent = self.agents[node_name]; + const self = this; + const agent = self.agents[node_name]; if (!agent) { dbg.log0('agent not found', node_name); return; @@ -605,8 +605,8 @@ class AgentCLI { * */ show(func_name) { - var func = this[func_name]; - var helper = func && func.helper; + const func = this[func_name]; + const helper = func && func.helper; // in the common case the helper is a function - so call it if (typeof(helper) === 'function') { return helper.call(this); @@ -641,7 +641,7 @@ class AgentCLI { function agent_cli_repl() { // start a Read-Eval-Print-Loop - var repl_srv = repl.start({ + const repl_srv = repl.start({ prompt: 'agent-cli > ', useGlobal: false }); @@ -724,15 +724,15 @@ function main() { if (argv.scale) { return P.map(_.range(0, argv.scale), i => { - let params = _.clone(argv); + const params = _.clone(argv); params.test_hostname = `${os.hostname()}-${i}`; - let test_agent_cli = new AgentCLI(params); + const test_agent_cli = new AgentCLI(params); hosts[params.test_hostname] = { params, host: test_agent_cli, running: true }; return test_agent_cli.init(); }) .then(() => argv.repl && agent_cli_repl()); } else { - var cli = new AgentCLI(argv); + const cli = new AgentCLI(argv); return cli.init() .then(() => argv.repl && agent_cli_repl()); } diff --git a/src/agent/agent_wrap.js b/src/agent/agent_wrap.js index 9cbadcc9ec..0336595484 100644 --- a/src/agent/agent_wrap.js +++ b/src/agent/agent_wrap.js @@ -84,7 +84,7 @@ async function run_agent_cli(agent_args = []) { if (!promise_handled) { dbg.error(`agent_cli exited for unknown reason. code=${code}, signal=${signal}`); promise_handled = true; - let e = new Error(`agent_cli exited for unknown reason. code=${code}, signal=${signal}`); + const e = new Error(`agent_cli exited for unknown reason. code=${code}, signal=${signal}`); e.code = code; reject(e); } diff --git a/src/agent/block_store_services/block_store_azure.js b/src/agent/block_store_services/block_store_azure.js index ce89501358..b9546547a0 100644 --- a/src/agent/block_store_services/block_store_azure.js +++ b/src/agent/block_store_services/block_store_azure.js @@ -175,11 +175,11 @@ class BlockStoreAzure extends BlockStoreBase { _delete_blocks(block_ids) { // Todo: Assuming that all requested blocks were deleted, which a bit naive - let deleted_storage = { + const deleted_storage = { size: 0, count: 0 }; - let failed_to_delete_block_ids = []; + const failed_to_delete_block_ids = []; dbg.log1('block_store_azure._delete_blocks block_ids: ', block_ids); return P.map_with_concurrency(10, block_ids, async block_id => { diff --git a/src/agent/block_store_services/block_store_base.js b/src/agent/block_store_services/block_store_base.js index f212f6260f..a1caa48e42 100644 --- a/src/agent/block_store_services/block_store_base.js +++ b/src/agent/block_store_services/block_store_base.js @@ -28,7 +28,7 @@ function _new_monitring_stats() { } function get_block_internal_dir(block_id) { - let internal_dir = hex_str_regex.test(block_id) ? + const internal_dir = hex_str_regex.test(block_id) ? block_id.substring(block_id.length - 3) + '.blocks' : 'other.blocks'; return internal_dir; @@ -262,7 +262,7 @@ class BlockStoreBase { dbg.log0('preallocate_block', block_md.id, block_md.size, block_md.digest_b64, 'node', this.node_name); this._check_write_space(block_md.size); const block_size = block_md.size; - let usage = { + const usage = { size: block_size, count: 1 }; diff --git a/src/agent/block_store_services/block_store_fs.js b/src/agent/block_store_services/block_store_fs.js index f2e909606f..3aa90b4b74 100644 --- a/src/agent/block_store_services/block_store_fs.js +++ b/src/agent/block_store_services/block_store_fs.js @@ -34,7 +34,7 @@ class BlockStoreFs extends BlockStoreBase { const num_dirs = 16 ** num_digits; const dir_list = []; for (let i = 0; i < num_dirs; ++i) { - let dir_str = string_utils.left_pad_zeros(i.toString(16), num_digits) + '.blocks'; + const dir_str = string_utils.left_pad_zeros(i.toString(16), num_digits) + '.blocks'; dir_list.push(path.join(this.blocks_path_root, dir_str)); } dir_list.push(path.join(this.blocks_path_root, 'other.blocks')); @@ -147,8 +147,8 @@ class BlockStoreFs extends BlockStoreBase { md_overwrite_stat.size : 0); overwrite_count = 1; } - let size = (block_md.is_preallocated ? 0 : data.length) + block_md_data.length - overwrite_size; - let count = (block_md.is_preallocated ? 0 : 1) - overwrite_count; + const size = (block_md.is_preallocated ? 0 : data.length) + block_md_data.length - overwrite_size; + const count = (block_md.is_preallocated ? 0 : 1) - overwrite_count; if (size || count) this._update_usage({ size, count }); }); } @@ -159,7 +159,7 @@ class BlockStoreFs extends BlockStoreBase { _delete_blocks(block_ids) { - let failed_to_delete_block_ids = []; + const failed_to_delete_block_ids = []; return P.map_with_concurrency(10, block_ids, block_id => this._delete_block(block_id) .catch(err => { @@ -200,7 +200,7 @@ class BlockStoreFs extends BlockStoreBase { ]) .then(() => { if (this._usage && del_stat) { - let usage = { + const usage = { size: -(del_stat.size + ((md_del_stat && md_del_stat.size) ? md_del_stat.size : 0)), count: -1 }; @@ -219,7 +219,7 @@ class BlockStoreFs extends BlockStoreBase { dbg.log0('counted disk usage', usage); this._usage = usage; // object with properties size and count // update usage file - let usage_data = JSON.stringify(this._usage); + const usage_data = JSON.stringify(this._usage); return fs.promises.writeFile(this.usage_path, usage_data) .then(() => usage); }); @@ -251,17 +251,17 @@ class BlockStoreFs extends BlockStoreBase { } _get_block_data_path(block_id) { - let block_dir = get_block_internal_dir(block_id); + const block_dir = get_block_internal_dir(block_id); return path.join(this.blocks_path_root, block_dir, block_id + '.data'); } _get_block_meta_path(block_id) { - let block_dir = get_block_internal_dir(block_id); + const block_dir = get_block_internal_dir(block_id); return path.join(this.blocks_path_root, block_dir, block_id + '.meta'); } _get_block_other_path(file) { - let block_dir = get_block_internal_dir('other'); + const block_dir = get_block_internal_dir('other'); return path.join(this.blocks_path_root, block_dir, file); } diff --git a/src/agent/block_store_services/block_store_google.js b/src/agent/block_store_services/block_store_google.js index 9bdef30d30..c7a9df51c6 100644 --- a/src/agent/block_store_services/block_store_google.js +++ b/src/agent/block_store_services/block_store_google.js @@ -151,10 +151,9 @@ class BlockStoreGoogle extends BlockStoreBase { } async _write_block(block_md, data, options) { - let encoded_md; const key = this._block_key(block_md.id); const target_file = this.bucket.file(key); - encoded_md = this._encode_block_md(block_md); + const encoded_md = this._encode_block_md(block_md); const write_stream = target_file.createWriteStream({ metadata: { metadata: { @@ -225,11 +224,11 @@ class BlockStoreGoogle extends BlockStoreBase { async _delete_blocks(block_ids) { // Todo: Assuming that all requested blocks were deleted, which a bit naive - let deleted_storage = { + const deleted_storage = { size: 0, count: 0 }; - let failed_to_delete_block_ids = []; + const failed_to_delete_block_ids = []; // limit concurrency to 10 await P.map_with_concurrency(10, block_ids, async block_id => { const block_key = this._block_key(block_id); diff --git a/src/agent/block_store_services/block_store_mongo.js b/src/agent/block_store_services/block_store_mongo.js index ecf68d237c..40fbf42221 100644 --- a/src/agent/block_store_services/block_store_mongo.js +++ b/src/agent/block_store_services/block_store_mongo.js @@ -186,7 +186,7 @@ class BlockStoreMongo extends BlockStoreBase { } _delete_blocks(block_ids) { - let failed_to_delete_block_ids = []; + const failed_to_delete_block_ids = []; const block_names = _.map(block_ids, block_id => this._block_key(block_id)); return sem_delete.surround(() => P.map_with_concurrency(10, block_names, block_name => diff --git a/src/agent/block_store_services/block_store_s3.js b/src/agent/block_store_services/block_store_s3.js index 94adb56406..164bfcac8b 100644 --- a/src/agent/block_store_services/block_store_s3.js +++ b/src/agent/block_store_services/block_store_s3.js @@ -362,11 +362,11 @@ class BlockStoreS3 extends BlockStoreBase { } async _delete_blocks(block_ids) { - let deleted_storage = { + const deleted_storage = { size: 0, count: 0 }; - let failed_block_ids = []; + const failed_block_ids = []; // Todo: Assuming that all requested blocks were deleted, which a bit naive try { if (this.cloud_info.aws_sts_arn) { diff --git a/src/core/nsfs.js b/src/core/nsfs.js index 781cc531b7..4783ad4d1f 100644 --- a/src/core/nsfs.js +++ b/src/core/nsfs.js @@ -85,7 +85,7 @@ async function main(argv = minimist(process.argv.slice(2))) { if (!fs_root) return print_usage(); const versioning = argv.versioning || 'DISABLED'; - let fs_config = { + const fs_config = { uid: Number(argv.uid) || process.getuid(), gid: Number(argv.gid) || process.getgid(), backend, diff --git a/src/deploy/NVA_build/mongo_init_rs.js b/src/deploy/NVA_build/mongo_init_rs.js index d7bf284508..11978c56d0 100644 --- a/src/deploy/NVA_build/mongo_init_rs.js +++ b/src/deploy/NVA_build/mongo_init_rs.js @@ -3,8 +3,8 @@ 'use strict'; -var host; -var user; +let host; +let user; // authenticate db.getSiblingDB("$external").auth({ @@ -12,7 +12,7 @@ db.getSiblingDB("$external").auth({ user: user }); -var rs_config = { +const rs_config = { _id: 'shard1', members: [{ _id: 0, diff --git a/src/deploy/ec2_wrapper.js b/src/deploy/ec2_wrapper.js index fcf8f37399..ec41360357 100644 --- a/src/deploy/ec2_wrapper.js +++ b/src/deploy/ec2_wrapper.js @@ -1,15 +1,15 @@ /* Copyright (C) 2016 NooBaa */ "use strict"; -var _ = require('lodash'); -var P = require('../util/promise'); -var fs = require('fs'); -var path = require('path'); -var util = require('util'); -var dotenv = require('../util/dotenv'); -var argv = require('minimist')(process.argv); -var AWS = require('aws-sdk'); -var moment = require('moment'); +const _ = require('lodash'); +const P = require('../util/promise'); +const fs = require('fs'); +const path = require('path'); +const util = require('util'); +const dotenv = require('../util/dotenv'); +const argv = require('minimist')(process.argv); +const AWS = require('aws-sdk'); +const moment = require('moment'); /** * @@ -62,12 +62,12 @@ if (!process.env.AWS_ACCESS_KEY_ID) { // the heroku app name -var app_name = ''; +let app_name = ''; -var _ec2 = new AWS.EC2(); -var _ec2_per_region = {}; +const _ec2 = new AWS.EC2(); +const _ec2_per_region = {}; -var _cached_ami_image_id; +let _cached_ami_image_id; load_aws_config_env(); @@ -86,13 +86,13 @@ load_aws_config_env(); * */ function describe_instances(params, filter, verbose) { - var regions = []; + const regions = []; return foreach_region(function(region) { regions.push(region); return ec2_region_call(region.RegionName, 'describeInstances', params) .then(function(res) { // return a flat array of instances from res.Reservations[].Instances[] - var instances = _.flatten(_.map(res.Reservations, 'Instances')); + const instances = _.flatten(_.map(res.Reservations, 'Instances')); // prepare instance extra fields and filter out irrelevant instances return _.filter(instances, function(instance) { instance.region = region; @@ -134,7 +134,7 @@ function describe_instances(params, filter, verbose) { }) .then(function(res) { // flatten again for all regions, remove regions without results - var instances = _.flatten(_.filter(res, function(r) { + const instances = _.flatten(_.filter(res, function(r) { return r !== false; })); // also put the regions list as a "secret" property of the array @@ -260,7 +260,7 @@ function create_instance_from_ami(ami_name, region, instance_type, name) { }); }) .then(function(res) { - var id = res.Instances[0].InstanceId; + const id = res.Instances[0].InstanceId; console.log('Got instanceID', id); if (name) { add_instance_name(id, name, region); @@ -295,7 +295,7 @@ function get_ip_address(instid) { .then(function(res) { //On pending instance state, still no public IP. Wait. if (res.State.Name === 'pending') { - var params = { + const params = { InstanceIds: [instid], }; @@ -331,8 +331,8 @@ function get_ip_address(instid) { function verify_demo_system(ip) { load_demo_config_env(); //switch to Demo system - var rest_endpoint = 'http://' + ip + ':80/'; - var s3bucket = new AWS.S3({ + const rest_endpoint = 'http://' + ip + ':80/'; + const s3bucket = new AWS.S3({ endpoint: rest_endpoint, s3ForcePathStyle: true, sslEnabled: false, @@ -355,8 +355,8 @@ function verify_demo_system(ip) { function put_object(ip, source, bucket, key, timeout, throw_on_error) { load_demo_config_env(); //switch to Demo system - var rest_endpoint = 'http://' + ip + ':80'; - var s3bucket = new AWS.S3({ + const rest_endpoint = 'http://' + ip + ':80'; + const s3bucket = new AWS.S3({ endpoint: rest_endpoint, s3ForcePathStyle: true, sslEnabled: false, @@ -367,22 +367,22 @@ function put_object(ip, source, bucket, key, timeout, throw_on_error) { bucket = bucket || 'first.bucket'; key = key || 'ec2_wrapper_test_upgrade.dat'; - var params = { + const params = { Bucket: bucket, Key: key, Body: fs.createReadStream(source), }; console.log('about to upload object', params); - var start_ts = Date.now(); + let start_ts = Date.now(); return P.ninvoke(s3bucket, 'upload', params) .then(function(res) { console.log('Uploaded object took', (Date.now() - start_ts) / 1000, 'seconds, result', res); load_aws_config_env(); //back to EC2/S3 }, function(err) { - var wait_limit_in_sec = timeout || 1200; - var start_moment = moment(); - var wait_for_agents = (err.statusCode === 500 || err.statusCode === 403); + const wait_limit_in_sec = timeout || 1200; + const start_moment = moment(); + let wait_for_agents = (err.statusCode === 500 || err.statusCode === 403); console.log('failed to upload object in loop', err.statusCode, wait_for_agents); return P.pwhile( function() { @@ -403,7 +403,7 @@ function put_object(ip, source, bucket, key, timeout, throw_on_error) { }, function(err2) { console.log('failed to upload. Will wait 10 seconds and retry. err', err2.statusCode); - var curr_time = moment(); + const curr_time = moment(); if (curr_time.subtract(wait_limit_in_sec, 'second') > start_moment) { console.error('failed to upload. cannot wait any more', err2.statusCode); load_aws_config_env(); //back to EC2/S3 @@ -423,21 +423,21 @@ function put_object(ip, source, bucket, key, timeout, throw_on_error) { function get_object(ip, obj_path) { load_demo_config_env(); //switch to Demo system - var rest_endpoint = 'http://' + ip + ':80/'; - var s3bucket = new AWS.S3({ + const rest_endpoint = 'http://' + ip + ':80/'; + const s3bucket = new AWS.S3({ endpoint: rest_endpoint, s3ForcePathStyle: true, sslEnabled: false, }); - var params = { + const params = { Bucket: 'first.bucket', Key: 'ec2_wrapper_test_upgrade.dat', }; - var file = obj_path && fs.createWriteStream(obj_path); + const file = obj_path && fs.createWriteStream(obj_path); - var start_ts = Date.now(); + const start_ts = Date.now(); console.log('about to download object'); return P.fcall(function() { if (obj_path) { @@ -479,10 +479,10 @@ function scale_agent_instances(count, allow_terminate, is_docker_host, number_of }, { match: app_name, }).then(function(instances) { - var instances_per_region = _.groupBy(instances, 'region_name'); - var region_names = _.map(instances.regions, 'RegionName'); - var target_region_count = 0; - var first_region_extra_count = 0; + const instances_per_region = _.groupBy(instances, 'region_name'); + let region_names = _.map(instances.regions, 'RegionName'); + let target_region_count = 0; + let first_region_extra_count = 0; if (filter_region !== '') { console.log('Filter and use only region:', filter_region); @@ -514,10 +514,10 @@ function scale_agent_instances(count, allow_terminate, is_docker_host, number_of console.log('Scale:', target_region_count, 'per region'); console.log('Scale:', first_region_extra_count, 'extra in first region'); - var new_count = 0; + let new_count = 0; return P.all(_.map(region_names, function(region_name) { - var region_instances = instances_per_region[region_name] || []; - var region_count = 0; + const region_instances = instances_per_region[region_name] || []; + let region_count = 0; if (new_count < count) { if (first_region_extra_count > 0 && region_name === region_names[0]) { region_count = target_region_count + first_region_extra_count; @@ -538,18 +538,18 @@ function scale_agent_instances(count, allow_terminate, is_docker_host, number_of * */ function add_agent_region_instances(region_name, count, is_docker_host, number_of_dockers, is_win, agent_conf) { - var instance_type = 'c3.large'; + let instance_type = 'c3.large'; // the run script to send to started instances - var run_script = fs.readFileSync(path.join(__dirname, 'init_agent.sh'), 'UTF8'); + let run_script = fs.readFileSync(path.join(__dirname, 'init_agent.sh'), 'UTF8'); - var test_instances_counter; + let test_instances_counter; if (is_docker_host) { instance_type = 'm3.2xlarge'; run_script = fs.readFileSync(path.join(__dirname, 'docker_setup.sh'), 'utf8'); //replace 'test' with the correct env name test_instances_counter = (run_script.match(/test/g) || []).length; - var dockers_instances_counter = (run_script.match(/200/g) || []).length; + const dockers_instances_counter = (run_script.match(/200/g) || []).length; if (test_instances_counter !== 1 || dockers_instances_counter !== 1) { throw new Error('docker_setup.sh expected to contain default env "test" and default number of dockers - 200'); @@ -663,7 +663,7 @@ function ec2_call(func_name, params) { function ec2_region_call(region_name, func_name, params) { - var ec2 = _ec2_per_region[region_name] || new AWS.EC2({ + const ec2 = _ec2_per_region[region_name] || new AWS.EC2({ region: region_name }); _ec2_per_region[region_name] = ec2; @@ -676,7 +676,7 @@ function set_app_name(appname) { } function ec2_wait_for(region_name, state_name, params) { - var ec2 = _ec2_per_region[region_name] || new AWS.EC2({ + const ec2 = _ec2_per_region[region_name] || new AWS.EC2({ region: region_name }); _ec2_per_region[region_name] = ec2; @@ -732,9 +732,9 @@ function scale_region(region_name, count, instances, allow_terminate, is_docker_ } console.log('ScaleRegion:', region_name, 'has', instances.length, ' --- removing', instances.length - count); - var death_row = _.slice(instances, 0, instances.length - count); + const death_row = _.slice(instances, 0, instances.length - count); console.log('death:', death_row.length); - var ids = _.map(death_row, 'InstanceId'); + const ids = _.map(death_row, 'InstanceId'); return terminate_instances(region_name, ids); } @@ -751,7 +751,7 @@ function scale_region(region_name, count, instances, allow_terminate, is_docker_ * */ function create_security_group(region_name) { - var ssh_and_http_v2 = 'ssh_and_http_v2'; + const ssh_and_http_v2 = 'ssh_and_http_v2'; // first find if the group exists return ec2_region_call(region_name, 'describeSecurityGroups', { diff --git a/src/deploy/kubernetes_functions.js b/src/deploy/kubernetes_functions.js index ecd8ccf059..663285d96a 100644 --- a/src/deploy/kubernetes_functions.js +++ b/src/deploy/kubernetes_functions.js @@ -176,7 +176,7 @@ class KubernetesFunctions { }) { const server_details = {}; try { - let resources_file_path = path.join(this.output_dir, `${this.namespace}.server_deployment.${Date.now()}.json`); + const resources_file_path = path.join(this.output_dir, `${this.namespace}.server_deployment.${Date.now()}.json`); // modify resources and write to temp yaml const resources = await this.read_resources(server_yaml); const statefulset = resources.find(res => res.kind === 'StatefulSet'); diff --git a/src/endpoint/blob/blob_rest.js b/src/endpoint/blob/blob_rest.js index df92b6296e..e251cd9243 100644 --- a/src/endpoint/blob/blob_rest.js +++ b/src/endpoint/blob/blob_rest.js @@ -122,7 +122,7 @@ function authenticate_request(req) { const system_store = require('../../server/system_services/system_store').get_instance(); // eslint-disable-line global-require try { // TODO: fix authentication. currently autherizes everything. - let system = system_store.data.systems[0]; + const system = system_store.data.systems[0]; const auth_token = auth_server.make_auth_token({ system_id: system._id, account_id: system.owner._id, diff --git a/src/endpoint/blob/blob_utils.js b/src/endpoint/blob/blob_utils.js index 1360752518..21d9b508d6 100644 --- a/src/endpoint/blob/blob_utils.js +++ b/src/endpoint/blob/blob_utils.js @@ -25,10 +25,10 @@ function set_response_object_md(res, object_md) { const X_MS_META = 'x-ms-meta-'; function get_request_xattr(req) { - let xattr = {}; + const xattr = {}; _.each(req.headers, (val, hdr) => { if (!hdr.startsWith(X_MS_META)) return; - let key = hdr.slice(X_MS_META.length); + const key = hdr.slice(X_MS_META.length); if (!key) return; xattr[key] = val; }); @@ -101,9 +101,9 @@ async function list_objects(params, account_name, container, sasToken) { blobs = parsed.EnumerationResults.Blobs[0].Blob; dirs = parsed.EnumerationResults.Blobs[0].BlobPrefix; next_marker = parsed.EnumerationResults.NextMarker[0]; - let parse_blobs = key => { + const parse_blobs = key => { const props = key.Properties[0]; - let obj = Object.keys(props).reduce((acc, p) => { + const obj = Object.keys(props).reduce((acc, p) => { acc[(_.lowerFirst(_.camelCase(p)))] = props[p][0]; return acc; }, { name: key.Name[0] }); diff --git a/src/endpoint/blob/ops/blob_put_blob.js b/src/endpoint/blob/ops/blob_put_blob.js index cfae5a71e7..3124213d07 100644 --- a/src/endpoint/blob/ops/blob_put_blob.js +++ b/src/endpoint/blob/ops/blob_put_blob.js @@ -11,7 +11,7 @@ const mime = require('mime'); * https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob */ async function put_blob(req, res) { - let copy_source = blob_utils.parse_copy_source(req); + const copy_source = blob_utils.parse_copy_source(req); const { etag } = await req.object_sdk.upload_object({ bucket: req.params.bucket, diff --git a/src/endpoint/endpoint.js b/src/endpoint/endpoint.js index c0715431fe..e117317eed 100755 --- a/src/endpoint/endpoint.js +++ b/src/endpoint/endpoint.js @@ -43,7 +43,7 @@ const endpoint_stats_collector = require('../sdk/endpoint_stats_collector'); const { NamespaceMonitor } = require('../server/bg_services/namespace_monitor'); if (process.env.NOOBAA_LOG_LEVEL) { - let dbg_conf = debug_config.get_debug_config(process.env.NOOBAA_LOG_LEVEL); + const dbg_conf = debug_config.get_debug_config(process.env.NOOBAA_LOG_LEVEL); dbg_conf.endpoint.map(module => dbg.set_module_level(dbg_conf.level, module)); } diff --git a/src/endpoint/s3/ops/s3_put_object_retention.js b/src/endpoint/s3/ops/s3_put_object_retention.js index 89748d83a5..623a507987 100644 --- a/src/endpoint/s3/ops/s3_put_object_retention.js +++ b/src/endpoint/s3/ops/s3_put_object_retention.js @@ -19,7 +19,7 @@ async function put_object_retention(req) { if (!mode || !retain_until_date) throw new S3Error(S3Error.MalformedXML); retain_until_date = new Date(req.body.Retention.RetainUntilDate[0]); - let bypass_governance = req.headers['x-amz-bypass-governance-retention'] && req.headers['x-amz-bypass-governance-retention'].toUpperCase() === 'TRUE'; + const bypass_governance = req.headers['x-amz-bypass-governance-retention'] && req.headers['x-amz-bypass-governance-retention'].toUpperCase() === 'TRUE'; if (s3_utils._is_valid_retention(mode, retain_until_date)) { await req.object_sdk.put_object_retention({ diff --git a/src/endpoint/s3/s3_rest.js b/src/endpoint/s3/s3_rest.js index b8fa4763ff..65ea8c63f0 100755 --- a/src/endpoint/s3/s3_rest.js +++ b/src/endpoint/s3/s3_rest.js @@ -357,7 +357,7 @@ function parse_op_name(req) { } function handle_error(req, res, err) { - var s3err = + let s3err = ((err instanceof S3Error) && err) || new S3Error(S3Error.RPC_ERRORS_TO_S3[err.rpc_code] || S3Error.InternalError); @@ -407,7 +407,7 @@ function handle_error(req, res, err) { } async function _handle_html_response(req, res, err) { - var s3err = + let s3err = ((err instanceof S3Error) && err) || new S3Error(S3Error.RPC_ERRORS_TO_S3[err.rpc_code] || S3Error.InternalError); diff --git a/src/endpoint/s3/s3_utils.js b/src/endpoint/s3/s3_utils.js index 9dbc876ca8..eb6e1f4b50 100644 --- a/src/endpoint/s3/s3_utils.js +++ b/src/endpoint/s3/s3_utils.js @@ -120,7 +120,7 @@ function decode_chunked_upload(source_stream) { } function format_s3_xml_date(input) { - let date = input ? new Date(input) : new Date(); + const date = input ? new Date(input) : new Date(); date.setMilliseconds(0); return date.toISOString(); } @@ -128,10 +128,10 @@ function format_s3_xml_date(input) { const X_AMZ_META = 'x-amz-meta-'; function get_request_xattr(req) { - let xattr = {}; + const xattr = {}; _.each(req.headers, (val, hdr) => { if (!hdr.startsWith(X_AMZ_META)) return; - let key = hdr.slice(X_AMZ_META.length); + const key = hdr.slice(X_AMZ_META.length); if (!key) return; xattr[key] = val; }); @@ -475,18 +475,18 @@ function parse_body_object_lock_conf_xml(req) { }; if (retention.Days) { - let days = parseInt(retention.Days[0], 10); + const days = parseInt(retention.Days[0], 10); if (days <= 0) { - let err = new S3Error(S3Error.InvalidArgument); + const err = new S3Error(S3Error.InvalidArgument); err.message = 'Default retention period must be a positive integer value'; throw err; } conf.rule.default_retention.days = days; } if (retention.Years) { - let years = parseInt(retention.Years[0], 10); + const years = parseInt(retention.Years[0], 10); if (years <= 0) { - let err = new S3Error(S3Error.InvalidArgument); + const err = new S3Error(S3Error.InvalidArgument); err.message = 'Default retention period must be a positive integer value'; throw err; } diff --git a/src/endpoint/sts/sts_rest.js b/src/endpoint/sts/sts_rest.js index f6b1177a68..9e6fbd7091 100644 --- a/src/endpoint/sts/sts_rest.js +++ b/src/endpoint/sts/sts_rest.js @@ -175,7 +175,7 @@ function parse_op_name(req, action) { } function handle_error(req, res, err) { - let stserr = + const stserr = ((err instanceof StsError) && err) || new StsError(RPC_ERRORS_TO_STS[err.rpc_code] || StsError.InternalFailure); diff --git a/src/hosted_agents/hosted_agents.js b/src/hosted_agents/hosted_agents.js index 5462195891..a1da431cfb 100644 --- a/src/hosted_agents/hosted_agents.js +++ b/src/hosted_agents/hosted_agents.js @@ -138,7 +138,7 @@ class HostedAgents { const existing_token = info.agent_info ? info.agent_info.node_token : null; const pool_agent_path = pool.resource_type === 'INTERNAL' ? 'mongo_pool_info' : 'cloud_pool_info'; - let update = { + const update = { pools: [{ _id: pool._id, [`${pool_agent_path}.agent_info`]: { @@ -246,8 +246,8 @@ class HostedAgents { dbg.warn(`${node_name} is not started. ignoring stop`); return; } - let agent = this._started_agents[node_name].agent; - let agent_pool = this._started_agents[node_name].pool; + const agent = this._started_agents[node_name].agent; + const agent_pool = this._started_agents[node_name].pool; if (agent) { agent.stop(); try { diff --git a/src/rpc/ice.js b/src/rpc/ice.js index 2aae291709..0fa0e7678b 100644 --- a/src/rpc/ice.js +++ b/src/rpc/ice.js @@ -111,7 +111,7 @@ util.inherits(Ice, events.EventEmitter); * */ function Ice(connid, n2n_config, signal_target) { - var self = this; + const self = this; events.EventEmitter.call(self); self.setMaxListeners(100); @@ -179,9 +179,9 @@ function Ice(connid, n2n_config, signal_target) { * using crypto random to avoid predictability */ function random_crypto_string(len, char_pool) { - var str = ''; - var bytes = crypto.randomBytes(len); - for (var i = 0; i < len; ++i) { + let str = ''; + const bytes = crypto.randomBytes(len); + for (let i = 0; i < len; ++i) { str += char_pool[bytes[i] % char_pool.length]; } return str; @@ -194,7 +194,7 @@ function random_crypto_string(len, char_pool) { * */ Ice.prototype.connect = function() { - var self = this; + const self = this; // mark the connect side as controlling, which means I will be the one // choosing the best connection to use. @@ -240,7 +240,7 @@ Ice.prototype.connect = function() { * in the background it starts to try connecting. */ Ice.prototype.accept = function(remote_info) { - var self = this; + const self = this; return P.fcall(function() { @@ -277,7 +277,7 @@ Ice.prototype.accept = function(remote_info) { * _add_local_candidates */ Ice.prototype._add_local_candidates = function() { - var self = this; + const self = this; return Promise.all([ self._add_udp_candidates(), self._add_tcp_active_candidates(), @@ -297,7 +297,7 @@ Ice.prototype._add_local_candidates = function() { * _add_udp_candidates */ Ice.prototype._add_udp_candidates = function() { - var self = this; + const self = this; if (!self.config.udp_port) return; if (!process.env.ENABLE_N2N_UDP) return; @@ -334,7 +334,7 @@ Ice.prototype._add_udp_candidates = function() { * _add_tcp_active_candidates */ Ice.prototype._add_tcp_active_candidates = function() { - var self = this; + const self = this; if (!self.config.tcp_active) return; _.each(self.networks, function(n, ifcname) { self._add_local_candidate({ @@ -358,16 +358,16 @@ Ice.prototype._add_tcp_active_candidates = function() { * and other ICE isntances using the same config will share the server. */ Ice.prototype._add_tcp_permanent_passive_candidates = function() { - var self = this; + const self = this; if (!self.config.tcp_permanent_passive) return; - var conf = self.config.tcp_permanent_passive; + const conf = self.config.tcp_permanent_passive; return P.fcall(function() { // register my credentials in ice_map if (!conf.ice_map) { conf.ice_map = {}; } - var my_ice_key = self.local_credentials.ufrag + self.local_credentials.pwd; + const my_ice_key = self.local_credentials.ufrag + self.local_credentials.pwd; conf.ice_map[my_ice_key] = self; self.on('close', remove_my_from_ice_map); self.on('connect', remove_my_from_ice_map); @@ -379,8 +379,8 @@ Ice.prototype._add_tcp_permanent_passive_candidates = function() { // setup ice_lookup to find the ice instance by stun credentials if (!conf.ice_lookup) { conf.ice_lookup = function(buffer, info) { - var attr_map = stun.get_attrs_map(buffer); - var ice_key = attr_map.username.split(':', 1)[0] + attr_map.password; + const attr_map = stun.get_attrs_map(buffer); + const ice_key = attr_map.username.split(':', 1)[0] + attr_map.password; return conf.ice_map[ice_key]; }; } @@ -405,7 +405,7 @@ Ice.prototype._add_tcp_permanent_passive_candidates = function() { conf.server = null; }); } - var address = server.address(); + const address = server.address(); _.each(self.networks, function(n, ifcname) { self._add_local_candidate({ transport: 'tcp', @@ -428,13 +428,13 @@ Ice.prototype._add_tcp_permanent_passive_candidates = function() { * _add_tcp_transient_passive_candidates */ Ice.prototype._add_tcp_transient_passive_candidates = function() { - var self = this; + const self = this; if (!self.config.tcp_transient_passive) return; - var conf = self.config.tcp_transient_passive; + const conf = self.config.tcp_transient_passive; return listen_on_port_range(conf) .then(function(server) { - var address = server.address(); + const address = server.address(); // remember to close this server when ICE closes self.on('close', close_server); @@ -477,9 +477,9 @@ Ice.prototype._add_tcp_transient_passive_candidates = function() { * _add_tcp_simultaneous_open_candidates */ Ice.prototype._add_tcp_simultaneous_open_candidates = function() { - var self = this; + const self = this; if (!self.config.tcp_simultaneous_open) return; - var conf = self.config.tcp_simultaneous_open; + const conf = self.config.tcp_simultaneous_open; return P.all(_.map(self.networks, function(n, ifcname) { return allocate_port_in_range(conf) .then(function(port) { @@ -507,8 +507,8 @@ Ice.prototype._add_tcp_simultaneous_open_candidates = function() { * */ Ice.prototype._add_local_candidate = function(candidate) { - var self = this; - var local = new IceCandidate(candidate); + const self = this; + const local = new IceCandidate(candidate); if (self.local_candidates[local.key]) return; dbg.log3('ICE ADDED LOCAL CANDIDATE', local.key, self.connid); @@ -528,8 +528,8 @@ Ice.prototype._add_local_candidate = function(candidate) { * */ Ice.prototype._add_remote_candidate = function(candidate) { - var self = this; - var remote = new IceCandidate(candidate); + const self = this; + const remote = new IceCandidate(candidate); if (self.remote_candidates[remote.key]) return; dbg.log3('ICE ADDED REMOTE CANDIDATE', remote.key, self.connid); @@ -548,7 +548,7 @@ Ice.prototype._add_remote_candidate = function(candidate) { * */ Ice.prototype._check_connectivity = function(local, remote) { - var session = this._add_session_if_not_exists(local, remote); + const session = this._add_session_if_not_exists(local, remote); if (!session) return; if (!session.is_init()) return; session.mark_checking(); @@ -573,13 +573,13 @@ Ice.prototype._check_connectivity = function(local, remote) { * */ Ice.prototype._add_session_if_not_exists = function(local, remote) { - var self = this; + const self = this; // check if exists already by the session key // if exists and still valid then keep using it, // otherwise override it - var session_key = make_session_key(local, remote); - var existing = self.sessions_by_key[session_key]; + const session_key = make_session_key(local, remote); + const existing = self.sessions_by_key[session_key]; if (existing && !existing.is_closed()) return existing; // TODO should we support foundation and frozen candidates from the SPEC? @@ -593,7 +593,7 @@ Ice.prototype._add_session_if_not_exists = function(local, remote) { if (local.tcp_type === CAND_TCP_TYPE_SO && remote.tcp_type !== CAND_TCP_TYPE_SO) return; - var session; + let session; do { session = new IceSession(local, remote, self._make_stun_request_response(remote), self.udp); } while (self.sessions_by_tid[session.tid]); @@ -611,10 +611,10 @@ Ice.prototype._add_session_if_not_exists = function(local, remote) { * */ Ice.prototype._connect_tcp_active_passive_pair = function(session) { - var self = this; + const self = this; const MAX_ATTEMPTS = 10; - var attempts = 0; - var delay = 250; + let attempts = 0; + const delay = 250; try_ap(); function try_ap() { @@ -660,11 +660,11 @@ Ice.prototype._connect_tcp_active_passive_pair = function(session) { * */ Ice.prototype._connect_tcp_simultaneous_open_pair = function(session) { - var self = this; + const self = this; const MAX_ATTEMPTS = 200; - var attempts = 0; - var delay = 50; - var so_connect_conf = { + let attempts = 0; + let delay = 50; + const so_connect_conf = { port: session.remote.port, address: session.remote.address, localPort: session.local.port @@ -719,7 +719,7 @@ Ice.prototype._connect_tcp_simultaneous_open_pair = function(session) { * _init_udp_connection */ Ice.prototype._init_udp_connection = function(conn) { - var self = this; + const self = this; // remember to close this connection when ICE closes self.on('close', close_conn); @@ -762,7 +762,7 @@ Ice.prototype._init_tcp_connection = function(conn, session) { * see _init_tcp_connection above for an instance wrapper. */ function init_tcp_connection(conn, session, ice, ice_lookup) { - var info = { + const info = { family: conn.remoteFamily, address: conn.remoteAddress, port: conn.remotePort, @@ -772,7 +772,7 @@ function init_tcp_connection(conn, session, ice, ice_lookup) { session: session, }; - var temp_queue = []; + let temp_queue = []; if (ice) { // remember to close this connection when ICE closes @@ -805,9 +805,9 @@ function init_tcp_connection(conn, session, ice, ice_lookup) { if (event !== 'message') return; dbg.log1('ICE TCP UNLEASH', temp_queue.length, 'QUEUED MESSAGES', info.key); conn.removeListener('newListener', new_listener_handler); - var mq = temp_queue; + const mq = temp_queue; temp_queue = null; - for (var i = 0; i < mq.length; ++i) { + for (let i = 0; i < mq.length; ++i) { conn.emit('message', mq[i]); } }); @@ -848,14 +848,14 @@ function init_tcp_connection(conn, session, ice, ice_lookup) { * _find_session_to_activate */ Ice.prototype._find_session_to_activate = function(force) { - var self = this; + const self = this; if (self.closed) return; // only the controlling chooses sessions to activate if (!self.controlling) return; - var best_session; - var highest_non_closed_priority = -Infinity; + let best_session; + let highest_non_closed_priority = -Infinity; // find best session and see if there's any pending sessions with higher priority _.each(self.sessions_by_tid, function(session) { @@ -896,7 +896,7 @@ Ice.prototype._activate_session = function(session) { dbg.log3('ICE SESSION ACTIVATING', session.key); - var activate_packet = stun.new_packet(stun.METHODS.REQUEST, [{ + const activate_packet = stun.new_packet(stun.METHODS.REQUEST, [{ type: stun.ATTRS.USE_CANDIDATE, value: '1' }, { @@ -931,7 +931,7 @@ Ice.prototype._activate_session = function(session) { * _activate_session_complete */ Ice.prototype._activate_session_complete = function(session) { - var self = this; + const self = this; if (self.closed) return; if (self.active_session) return; dbg.log3('ICE SESSION ACTIVE', session.key); @@ -958,11 +958,11 @@ Ice.prototype._activate_session_complete = function(session) { Ice.prototype._upgrade_to_tls = function(session) { - var self = this; + const self = this; dbg.log1('ICE UPGRADE TO TLS', session.key, session.state); - var tcp_conn = session.tcp; - var tls_conn; - var ssl_options = { honorCipherOrder: true, ...self.config.ssl_options }; + const tcp_conn = session.tcp; + let tls_conn; + const ssl_options = { honorCipherOrder: true, ...self.config.ssl_options }; if (self.controlling) { ssl_options.socket = tcp_conn; tls_conn = tls.connect(ssl_options); @@ -1002,7 +1002,7 @@ Ice.prototype._upgrade_to_tls = function(session) { * */ Ice.prototype._handle_stun_packet = function(buffer, info) { - var method = stun.get_method_field(buffer); + const method = stun.get_method_field(buffer); if (this.closed) { return this._bad_stun_packet(buffer, info, @@ -1057,7 +1057,7 @@ Ice.prototype._handle_stun_request = function(buffer, info) { // as were communicated by the signaller. // we only reply to requests with credentials in this path, // since this is not meant to be general stun server. - var attr_map = stun.get_attrs_map(buffer); + const attr_map = stun.get_attrs_map(buffer); if (!this._check_stun_credentials(attr_map)) { return this._bad_stun_packet(buffer, info, 'REQUEST WITH BAD CREDENTIALS'); } @@ -1103,7 +1103,7 @@ Ice.prototype._handle_stun_request = function(buffer, info) { } // send stun response - var reply = this._make_stun_request_response(info, buffer, attr_map.use_candidate); + const reply = this._make_stun_request_response(info, buffer, attr_map.use_candidate); if (info.tcp) { info.tcp.frame_stream.send_message([reply], ICE_FRAME_STUN_MSG_TYPE); } else { @@ -1123,11 +1123,11 @@ Ice.prototype._handle_stun_request = function(buffer, info) { * */ Ice.prototype._handle_stun_response = function(buffer, info) { - var attr_map; + let attr_map; // lookup the tid in the pending requests - var tid = stun.get_tid_field(buffer).toString('base64'); - var session = this.sessions_by_tid[tid]; + const tid = stun.get_tid_field(buffer).toString('base64'); + let session = this.sessions_by_tid[tid]; // check if this is a response from stun server if (!session) { @@ -1177,7 +1177,7 @@ Ice.prototype._handle_stun_response = function(buffer, info) { if (!info.session) { info.session = session; } - var changed = false; + let changed = false; if (info.session && info.session.mark_ready()) { dbg.log3('ICE SESSION READY (RESPONDED)', session.key, this.connid); changed = true; @@ -1204,15 +1204,15 @@ Ice.prototype._handle_stun_response = function(buffer, info) { * */ Ice.prototype._add_stun_servers_candidates = function(udp) { - var self = this; + const self = this; return P.map(self.config.stun_servers, function(stun_url) { if (!stun_url) return; stun_url = _.isString(stun_url) ? url_utils.quick_parse(stun_url) : stun_url; // this request is to public server and we need to know that // when processing the response to not require it to include credentials, // while the peer stun messages will be required to include it. - var session; - var family = net.isIPv6(stun_url.hostname) ? 'IPv6' : 'IPv4'; + let session; + const family = net.isIPv6(stun_url.hostname) ? 'IPv6' : 'IPv4'; do { // create "minimal candidates" local and remote session = new IceSession( @@ -1254,7 +1254,7 @@ Ice.prototype._check_stun_credentials = function(attr_map) { } // check the credentials match - var frags = attr_map.username.split(':', 2); + const frags = attr_map.username.split(':', 2); if (frags[0] !== this.local_credentials.ufrag || frags[1] !== this.remote_credentials.ufrag || attr_map.password !== this.local_credentials.pwd) { @@ -1294,7 +1294,7 @@ Ice.prototype._make_stun_request_response = function(info, request_buffer, use_c Ice.prototype.close = function() { - var self = this; + const self = this; if (self.closed) return; self.closed = true; self.emit('close'); @@ -1323,7 +1323,7 @@ function IceCandidate(cand) { util.inherits(IceSession, events.EventEmitter); function IceSession(local, remote, packet, udp) { - var self = this; + const self = this; events.EventEmitter.call(self); self.local = local; self.remote = remote; @@ -1387,7 +1387,7 @@ IceSession.prototype.mark_ready = function() { }; IceSession.prototype.mark_activating = function(packet) { - var self = this; + const self = this; switch (self.state) { case 'closed': throw new Error('ICE SESSION STATE CLOSED'); @@ -1451,7 +1451,7 @@ IceSession.prototype.run_udp_indication_loop = function() { this.indication = stun.new_packet(stun.METHODS.INDICATION, null, this.packet); } this.udp.send_outbound(this.indication, this.remote.port, this.remote.address, _.noop); - var delay = stun.INDICATION_INTERVAL * chance.floating(stun.INDICATION_JITTER); + const delay = stun.INDICATION_INTERVAL * chance.floating(stun.INDICATION_JITTER); setTimeout(this.run_udp_indication_loop, delay); }; @@ -1478,13 +1478,13 @@ function make_session_key(local, remote) { * @param port_range - port number or object with integers min,max */ function listen_on_port_range(port_range) { - var attempts = 0; - var max_attempts = 3; + let attempts = 0; + let max_attempts = 3; return P.fcall(try_to_listen); function try_to_listen() { - var port; - var server = net.createServer(); + let port; + const server = net.createServer(); if (typeof(port_range) === 'object') { if (typeof(port_range.min) === 'number' && typeof(port_range.max) === 'number') { @@ -1532,7 +1532,7 @@ function listen_on_port_range(port_range) { function allocate_port_in_range(port_range) { return listen_on_port_range(port_range) .then(function(server) { - var port = server.address().port; + const port = server.address().port; server.close(); return port; }); diff --git a/src/rpc/rpc.js b/src/rpc/rpc.js index ab13577ff3..72a1bdcff7 100644 --- a/src/rpc/rpc.js +++ b/src/rpc/rpc.js @@ -413,7 +413,7 @@ class RPC extends EventEmitter { * @param {RpcRequest} req */ _get_remote_address(req, options) { - var address = options.address; + let address = options.address; if (!address) { const domain = options.domain || this.api_routes[req.api.$id] || 'default'; address = this.router[domain]; @@ -421,7 +421,7 @@ class RPC extends EventEmitter { } assert(address, 'No RPC Address/Domain'); address = address.toLowerCase(); - var addr_url = this._address_to_url_cache.get(address); + let addr_url = this._address_to_url_cache.get(address); if (!addr_url) { addr_url = url_utils.quick_parse(address, true); this._address_to_url_cache.set(address, addr_url); @@ -434,7 +434,7 @@ class RPC extends EventEmitter { * @returns {RpcBaseConnection} */ _assign_connection(req, options) { - var conn = options.connection; + let conn = options.connection; if (!conn) { const addr_url = this._get_remote_address(req, options); conn = this._get_connection(addr_url, req.srv); @@ -465,7 +465,7 @@ class RPC extends EventEmitter { * @returns {RpcBaseConnection} */ _get_connection(addr_url, srv) { - var conn = this._connection_by_address.get(addr_url.href); + let conn = this._connection_by_address.get(addr_url.href); if (conn) { if (conn.is_closed()) { @@ -507,7 +507,7 @@ class RPC extends EventEmitter { */ _new_connection(addr_url) { dbg.log1('RPC _new_connection:', addr_url); - var conn; + let conn; switch (addr_url.protocol) { // order protocols by popularity case 'n2n:': { diff --git a/src/rpc/rpc_base_conn.js b/src/rpc/rpc_base_conn.js index de2cbe0b7f..d17abdd5c9 100644 --- a/src/rpc/rpc_base_conn.js +++ b/src/rpc/rpc_base_conn.js @@ -85,7 +85,7 @@ class RpcBaseConnection extends events.EventEmitter { this.on('message', encoded_msg => { try { - var decoded_message = this._decode_message(encoded_msg); + const decoded_message = this._decode_message(encoded_msg); this.emit('decoded_message', decoded_message); } catch (err) { dbg.error(`RPC decode message failed, got: ${err.message}`); @@ -192,7 +192,7 @@ class RpcBaseConnection extends events.EventEmitter { } _alloc_reqid() { - let reqid = this._rpc_req_seq + '@' + this.connid; + const reqid = this._rpc_req_seq + '@' + this.connid; this._rpc_req_seq += 1; return reqid; } diff --git a/src/rpc/rpc_http.js b/src/rpc/rpc_http.js index 3a099c61b9..02d92651a5 100644 --- a/src/rpc/rpc_http.js +++ b/src/rpc/rpc_http.js @@ -91,7 +91,7 @@ class RpcHttpConnection extends RpcBaseConnection { * */ send_http_response(msg, req) { - let res = this.res; + const res = this.res; if (!res) { throw new Error('HTTP RESPONSE ALREADY SENT ' + req.reqid); } @@ -112,16 +112,16 @@ class RpcHttpConnection extends RpcBaseConnection { * */ send_http_request(msg, rpc_req) { - let headers = {}; + const headers = {}; // set the url path only for logging to show it - let path = BASE_PATH + rpc_req.srv; + const path = BASE_PATH + rpc_req.srv; extract_meta_buffer(msg, headers); headers['content-length'] = _.sumBy(msg, 'length'); headers['content-type'] = 'application/json'; - let http_options = { + const http_options = { protocol: this.url.protocol, hostname: this.url.hostname, port: this.url.port, @@ -140,7 +140,7 @@ class RpcHttpConnection extends RpcBaseConnection { agent: http_utils.get_unsecured_agent(this.url.href) }; - let http_req = + const http_req = (http_options.protocol === 'https:') ? https.request(http_options) : http.request(http_options); @@ -148,7 +148,7 @@ class RpcHttpConnection extends RpcBaseConnection { dbg.log3('HTTP request', http_req.method, http_req.path, http_req._headers); - let send_defer = new P.Defer(); + const send_defer = new P.Defer(); // reject on send errors http_req.on('error', send_defer.reject); diff --git a/src/rpc/rpc_http_server.js b/src/rpc/rpc_http_server.js index f105ec905c..cd9f53de16 100644 --- a/src/rpc/rpc_http_server.js +++ b/src/rpc/rpc_http_server.js @@ -74,11 +74,11 @@ class RpcHttpServer extends events.EventEmitter { return; } - let host = req.connection.remoteAddress; - let port = req.connection.remotePort; - let proto = req.connection.ssl ? 'https' : 'http'; - let address = proto + '://' + host + ':' + port; - let conn = new RpcHttpConnection(url_utils.quick_parse(address)); + const host = req.connection.remoteAddress; + const port = req.connection.remotePort; + const proto = req.connection.ssl ? 'https' : 'http'; + const address = proto + '://' + host + ':' + port; + const conn = new RpcHttpConnection(url_utils.quick_parse(address)); conn.req = req; conn.res = res; conn.emit('connect'); diff --git a/src/rpc/rpc_n2n.js b/src/rpc/rpc_n2n.js index e78060c2f7..078dca14c2 100644 --- a/src/rpc/rpc_n2n.js +++ b/src/rpc/rpc_n2n.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -// let _ = require('lodash'); -let P = require('../util/promise'); -let dbg = require('../util/debug_module')(__filename); -let RpcBaseConnection = require('./rpc_base_conn'); -let Ice = require('./ice'); +// const _ = require('lodash'); +const P = require('../util/promise'); +const dbg = require('../util/debug_module')(__filename); +const RpcBaseConnection = require('./rpc_base_conn'); +const Ice = require('./ice'); /** * @@ -23,7 +23,7 @@ class RpcN2NConnection extends RpcBaseConnection { this.ice = new Ice(this.connid, n2n_agent.n2n_config, this.url.href); this.ice.on('close', () => { - let closed_err = new Error('N2N ICE CLOSED'); + const closed_err = new Error('N2N ICE CLOSED'); closed_err.stack = ''; this.emit('error', closed_err); }); @@ -31,7 +31,7 @@ class RpcN2NConnection extends RpcBaseConnection { this.ice.on('error', err => this.emit('error', err)); this.reset_n2n_listener = () => { - let reset_err = new Error('N2N RESET'); + const reset_err = new Error('N2N RESET'); reset_err.stack = ''; this.emit('error', reset_err); }; diff --git a/src/rpc/rpc_n2n_agent.js b/src/rpc/rpc_n2n_agent.js index 605b64f7ca..af153a437e 100644 --- a/src/rpc/rpc_n2n_agent.js +++ b/src/rpc/rpc_n2n_agent.js @@ -59,10 +59,10 @@ class RpcN2NAgent extends EventEmitter { // send_signal is function(info) that sends over a signal channel // and delivers the info to info.target, // and returns back the info that was returned by the peer. - let send_signal = options.send_signal; + const send_signal = options.send_signal; // lazy loading of nb_native to use Nudp - let Nudp = nb_native().Nudp; + const Nudp = nb_native().Nudp; // initialize the default config structure this.n2n_config = { @@ -107,7 +107,7 @@ class RpcN2NAgent extends EventEmitter { // callback to create and bind nudp socket // TODO implement nudp dtls udp_socket: (udp_port, dtls) => { - let nudp = new Nudp(); + const nudp = new Nudp(); return P.ninvoke(nudp, 'bind', 0, '0.0.0.0').then(port => { nudp.port = port; return nudp; @@ -166,7 +166,7 @@ class RpcN2NAgent extends EventEmitter { // emit 'reset_n2n' to notify all existing connections to close this.emit('reset_n2n'); - let remaining_listeners = this.listenerCount('reset_n2n'); + const remaining_listeners = this.listenerCount('reset_n2n'); if (remaining_listeners) { dbg.warn('update_n2n_config: remaining listeners on reset_n2n event', remaining_listeners, '(probably a connection that forgot to call close)'); @@ -174,7 +174,7 @@ class RpcN2NAgent extends EventEmitter { } disconnect() { - let conf = this.n2n_config.tcp_permanent_passive; + const conf = this.n2n_config.tcp_permanent_passive; if (conf.server) { dbg.log0('close tcp_permanent_passive old server'); conf.server.close(); @@ -198,14 +198,14 @@ class RpcN2NAgent extends EventEmitter { // target address is me, source is you. // the special case if rpc_address='n2n://*' allows testing code to accept for any target - let source = url_utils.quick_parse(params.source); - let target = url_utils.quick_parse(params.target); + const source = url_utils.quick_parse(params.source); + const target = url_utils.quick_parse(params.target); if (!this.rpc_address || !target || (this.rpc_address !== N2N_STAR && this.rpc_address !== target.href)) { throw new Error('N2N MISMATCHING PEER ID ' + params.target + ' my rpc_address ' + this.rpc_address); } - let conn = new RpcN2NConnection(source, this); + const conn = new RpcN2NConnection(source, this); conn.once('connect', () => this.emit('connection', conn)); return conn.accept(params.info); } diff --git a/src/rpc/rpc_ntcp.js b/src/rpc/rpc_ntcp.js index 86e66e13c5..d9aa24ceb7 100644 --- a/src/rpc/rpc_ntcp.js +++ b/src/rpc/rpc_ntcp.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -// let _ = require('lodash'); -// let P = require('../util/promise'); -let RpcBaseConnection = require('./rpc_base_conn'); -let nb_native = require('../util/nb_native'); -// let dbg = require('../util/debug_module')(__filename); +// const _ = require('lodash'); +// const P = require('../util/promise'); +const RpcBaseConnection = require('./rpc_base_conn'); +const nb_native = require('../util/nb_native'); +// const dbg = require('../util/debug_module')(__filename); /** @@ -23,7 +23,7 @@ class RpcNtcpConnection extends RpcBaseConnection { * */ _connect() { - let Ntcp = nb_native().Ntcp; + const Ntcp = nb_native().Ntcp; this.ntcp = new Ntcp(); this.ntcp.connect(this.url.port, this.url.hostname, () => this.emit('connect')); @@ -51,9 +51,9 @@ class RpcNtcpConnection extends RpcBaseConnection { } _init_tcp() { - let ntcp = this.ntcp; + const ntcp = this.ntcp; ntcp.on('close', () => { - let closed_err = new Error('TCP CLOSED'); + const closed_err = new Error('TCP CLOSED'); closed_err.stack = ''; this.emit('error', closed_err); }); diff --git a/src/rpc/rpc_ntcp_server.js b/src/rpc/rpc_ntcp_server.js index 6724cb02a2..208abe3ed7 100644 --- a/src/rpc/rpc_ntcp_server.js +++ b/src/rpc/rpc_ntcp_server.js @@ -19,7 +19,7 @@ class RpcNtcpServer extends EventEmitter { constructor(tls_options) { super(); this.protocol = (tls_options ? 'ntls:' : 'ntcp:'); - let Ntcp = nb_native().Ntcp; + const Ntcp = nb_native().Ntcp; this.server = new Ntcp(); this.server.on('connection', ntcp => this._on_connection(ntcp)); this.server.on('close', err => { @@ -58,8 +58,8 @@ class RpcNtcpServer extends EventEmitter { hostname: ntcp.remoteAddress, port: ntcp.remotePort }); - let addr_url = url.parse(address); - let conn = new RpcNtcpConnection(addr_url); + const addr_url = url.parse(address); + const conn = new RpcNtcpConnection(addr_url); dbg.log0('NTCP ACCEPT CONNECTION', conn.connid + ' ' + conn.url.href); conn.ntcp = ntcp; conn._init_tcp(); diff --git a/src/rpc/rpc_nudp.js b/src/rpc/rpc_nudp.js index e722736cdc..2e09308432 100644 --- a/src/rpc/rpc_nudp.js +++ b/src/rpc/rpc_nudp.js @@ -1,14 +1,14 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -// let _ = require('lodash'); -let P = require('../util/promise'); -// let url = require('url'); -let RpcBaseConnection = require('./rpc_base_conn'); -let nb_native = require('../util/nb_native'); -let stun = require('./stun'); -// let promise_utils = require('../util/promise_utils'); -// let dbg = require('../util/debug_module')(__filename); +// const _ = require('lodash'); +const P = require('../util/promise'); +// const url = require('url'); +const RpcBaseConnection = require('./rpc_base_conn'); +const nb_native = require('../util/nb_native'); +const stun = require('./stun'); +// const promise_utils = require('../util/promise_utils'); +// const dbg = require('../util/debug_module')(__filename); /** * @@ -20,7 +20,7 @@ class RpcNudpConnection extends RpcBaseConnection { // constructor(addr_url) { super(addr_url); } _connect() { - let Nudp = nb_native().Nudp; + const Nudp = nb_native().Nudp; this.nudp = new Nudp(); this._init_nudp(); return P.ninvoke(this.nudp, 'bind', 0, '0.0.0.0') @@ -42,7 +42,7 @@ class RpcNudpConnection extends RpcBaseConnection { } accept(port) { - let Nudp = nb_native().Nudp; + const Nudp = nb_native().Nudp; this.nudp = new Nudp(); this._init_nudp(); return P.ninvoke(this.nudp, 'bind', port, '0.0.0.0') @@ -53,7 +53,7 @@ class RpcNudpConnection extends RpcBaseConnection { } _init_nudp() { - let nudp = this.nudp; + const nudp = this.nudp; nudp.on('close', () => this.emit('error', new Error('NUDP CLOSED'))); nudp.on('error', err => this.emit('error', err)); nudp.on('message', msg => this.emit('message', [msg])); diff --git a/src/rpc/rpc_schema.js b/src/rpc/rpc_schema.js index e24805dbda..28379e583e 100644 --- a/src/rpc/rpc_schema.js +++ b/src/rpc/rpc_schema.js @@ -111,7 +111,7 @@ class RpcSchema { } method_api.validate_params = (params, desc) => { - let result = method_api.params_validator(params); + const result = method_api.params_validator(params); if (!result) { dbg.error('INVALID_SCHEMA_PARAMS', desc, method_api.fullname, 'ERRORS:', util.inspect(method_api.params_validator.errors, true, null, true), @@ -121,7 +121,7 @@ class RpcSchema { }; method_api.validate_reply = (reply, desc) => { - let result = method_api.reply_validator(reply); + const result = method_api.reply_validator(reply); if (!result) { dbg.error('INVALID_SCHEMA_REPLY', desc, method_api.fullname, 'ERRORS:', util.inspect(method_api.reply_validator.errors, true, null, true), diff --git a/src/rpc/rpc_tcp.js b/src/rpc/rpc_tcp.js index 300b8014e3..98bdcd9bb0 100644 --- a/src/rpc/rpc_tcp.js +++ b/src/rpc/rpc_tcp.js @@ -68,10 +68,10 @@ class RpcTcpConnection extends RpcBaseConnection { } _init_tcp() { - let tcp_conn = this.tcp_conn; + const tcp_conn = this.tcp_conn; tcp_conn.on('close', () => { - let closed_err = new Error('TCP CLOSED'); + const closed_err = new Error('TCP CLOSED'); closed_err.stack = ''; this.emit('error', closed_err); }); @@ -79,7 +79,7 @@ class RpcTcpConnection extends RpcBaseConnection { tcp_conn.on('error', err => this.emit('error', err)); tcp_conn.on('timeout', () => { - let timeout_err = new Error('TCP IDLE TIMEOUT'); + const timeout_err = new Error('TCP IDLE TIMEOUT'); timeout_err.stack = ''; this.emit('error', timeout_err); }); diff --git a/src/rpc/rpc_tcp_server.js b/src/rpc/rpc_tcp_server.js index be3b9617f5..ea4282878f 100644 --- a/src/rpc/rpc_tcp_server.js +++ b/src/rpc/rpc_tcp_server.js @@ -1,14 +1,14 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -// let _ = require('lodash'); -// let P = require('../util/promise'); -let net = require('net'); -let tls = require('tls'); -let url = require('url'); -let events = require('events'); -let RpcTcpConnection = require('./rpc_tcp'); -let dbg = require('../util/debug_module')(__filename); +// const _ = require('lodash'); +// const P = require('../util/promise'); +const net = require('net'); +const tls = require('tls'); +const url = require('url'); +const events = require('events'); +const RpcTcpConnection = require('./rpc_tcp'); +const dbg = require('../util/debug_module')(__filename); /** * @@ -66,8 +66,8 @@ class RpcTcpServer extends events.EventEmitter { hostname: tcp_conn.remoteAddress, port: tcp_conn.remotePort }); - let addr_url = url.parse(address); - let conn = new RpcTcpConnection(addr_url); + const addr_url = url.parse(address); + const conn = new RpcTcpConnection(addr_url); dbg.log0('TCP ACCEPT CONNECTION', conn.connid + ' ' + conn.url.href); conn.tcp_conn = tcp_conn; conn._init_tcp(); diff --git a/src/rpc/stun.js b/src/rpc/stun.js index bf718b6805..92cafcde6c 100644 --- a/src/rpc/stun.js +++ b/src/rpc/stun.js @@ -132,9 +132,9 @@ _.each(stun.PUBLIC_SERVERS, function(stun_url) { * detect stun packet according to header first byte */ function is_stun_packet(buffer) { - var block = buffer.readUInt8(0); - var bit1 = block & 0x80; - var bit2 = block & 0x40; + const block = buffer.readUInt8(0); + const bit1 = block & 0x80; + const bit2 = block & 0x40; return bit1 === 0 && bit2 === 0; } @@ -142,8 +142,8 @@ function is_stun_packet(buffer) { * create and initialize a new stun packet buffer */ function new_packet(method_code, attrs, req_buffer) { - var attrs_len = attrs ? encoded_attrs_len(attrs) : 0; - var buffer = Buffer.alloc(stun.HEADER_LENGTH + attrs_len); + const attrs_len = attrs ? encoded_attrs_len(attrs) : 0; + const buffer = Buffer.alloc(stun.HEADER_LENGTH + attrs_len); set_method_field(buffer, method_code); set_attrs_len_field(buffer, attrs_len); set_magic_and_tid_field(buffer, req_buffer); @@ -164,7 +164,7 @@ function get_method_field(buffer) { * decode the stun method field */ function get_method_name(buffer) { - var code = get_method_field(buffer); + const code = get_method_field(buffer); return stun.METHOD_NAMES[code]; } @@ -173,7 +173,7 @@ function get_method_name(buffer) { * set binding class which is the only option for stun. */ function set_method_field(buffer, method_code) { - var val = stun.BINDING_TYPE | (method_code & stun.METHOD_MASK); + const val = stun.BINDING_TYPE | (method_code & stun.METHOD_MASK); buffer.writeUInt16BE(val, 0); } @@ -185,7 +185,7 @@ function set_method_name(buffer, method_name) { if (!(method_name in stun.METHODS)) { throw new Error('bad stun method'); } - var method_code = stun.METHODS[method_name]; + const method_code = stun.METHODS[method_name]; set_method_field(buffer, method_code); } @@ -229,8 +229,8 @@ function get_tid_field(buffer) { * dup keys will be overriden by last value. */ function get_attrs_map(buffer) { - var attrs = decode_attrs(buffer); - var map = {}; + const attrs = decode_attrs(buffer); + const map = {}; _.each(attrs, function(attr) { switch (attr.type) { case stun.ATTRS.XOR_MAPPED_ADDRESS: @@ -271,9 +271,9 @@ function decode_attrs(buffer) { throw new Error('STUN PACKET TOO LONG, dropping buffer ' + buffer.length); } - var attrs = []; - var offset = stun.HEADER_LENGTH; - var end = offset + get_attrs_len_field(buffer); + const attrs = []; + let offset = stun.HEADER_LENGTH; + const end = offset + get_attrs_len_field(buffer); while (offset < end) { @@ -283,9 +283,9 @@ function decode_attrs(buffer) { throw new Error('STUN PACKET TOO MANY ATTRS dropping buffer ' + buffer.length); } - var type = buffer.readUInt16BE(offset); + const type = buffer.readUInt16BE(offset); offset += 2; - var length = buffer.readUInt16BE(offset); + const length = buffer.readUInt16BE(offset); offset += 2; if (length > 256) { @@ -293,8 +293,8 @@ function decode_attrs(buffer) { ' length=' + length + ' dropping buffer ' + buffer.length); } - var next = offset + length; - var value; + const next = offset + length; + let value; switch (type) { case stun.ATTRS.MAPPED_ADDRESS: case stun.ATTRS.RESPONSE_ADDRESS: @@ -343,8 +343,8 @@ function decode_attrs(buffer) { * */ function encoded_attrs_len(attrs) { - var len = 0; - for (var i = 0; i < attrs.length; ++i) { + let len = 0; + for (let i = 0; i < attrs.length; ++i) { if (!attrs[i]) continue; // every attr requires type and len 16bit each len = align_offset(len + 4 + encoded_attr_len(attrs[i])); @@ -383,16 +383,16 @@ function encoded_attr_len(attr) { * */ function encode_attrs(buffer, attrs) { - var offset = stun.HEADER_LENGTH; - for (var i = 0; i < attrs.length; ++i) { - var attr = attrs[i]; + let offset = stun.HEADER_LENGTH; + for (let i = 0; i < attrs.length; ++i) { + const attr = attrs[i]; if (!attr) continue; buffer.writeUInt16BE(attr.type, offset); offset += 2; - var length = encoded_attr_len(attr); + const length = encoded_attr_len(attr); buffer.writeUInt16BE(length, offset); offset += 2; - var next = offset + length; + const next = offset + length; switch (attr.type) { case stun.ATTRS.MAPPED_ADDRESS: @@ -433,9 +433,9 @@ function encode_attrs(buffer, attrs) { * though XOR-MAPPED-ADDRESS is preferred to avoid routers messing with it */ function decode_attr_mapped_addr(buffer, start, end) { - var family = (buffer.readUInt16BE(start) === 0x02) ? 6 : 4; - var port = buffer.readUInt16BE(start + 2); - var address = ip_module.toString(buffer, start + 4, family); + const family = (buffer.readUInt16BE(start) === 0x02) ? 6 : 4; + const port = buffer.readUInt16BE(start + 2); + const address = ip_module.toString(buffer, start + 4, family); return { family: 'IPv' + family, @@ -449,21 +449,21 @@ function decode_attr_mapped_addr(buffer, start, end) { * this is the main reply to stun request. */ function decode_attr_xor_mapped_addr(buffer, start, end) { - var family = (buffer.readUInt16BE(start) === 0x02) ? 6 : 4; + const family = (buffer.readUInt16BE(start) === 0x02) ? 6 : 4; // xor the port against the magic key - var port = buffer.readUInt16BE(start + 2) ^ + const port = buffer.readUInt16BE(start + 2) ^ buffer.readUInt16BE(stun.XOR_KEY_OFFSET); // xor the address against magic key and tid - var addr_buf = buffer.slice(start + 4, end); - var xor_buf = Buffer.allocUnsafe(addr_buf.length); - var k = stun.XOR_KEY_OFFSET; - for (var i = 0; i < xor_buf.length; ++i) { + const addr_buf = buffer.slice(start + 4, end); + const xor_buf = Buffer.allocUnsafe(addr_buf.length); + let k = stun.XOR_KEY_OFFSET; + for (let i = 0; i < xor_buf.length; ++i) { xor_buf[i] = addr_buf[i] ^ buffer[k]; k += 1; } - var address = ip_module.toString(xor_buf, 0, family); + const address = ip_module.toString(xor_buf, 0, family); return { family: 'IPv' + family, @@ -476,9 +476,9 @@ function decode_attr_xor_mapped_addr(buffer, start, end) { * decode ERROR-CODE attribute */ function decode_attr_error_code(buffer, start, end) { - var block = buffer.readUInt32BE(start); - var code = ((block & 0x700) * 100) + block & 0xff; - var reason = buffer.readUInt32BE(start + 4); + const block = buffer.readUInt32BE(start); + const code = ((block & 0x700) * 100) + block & 0xff; + const reason = buffer.readUInt32BE(start + 4); return { code: code, reason: reason @@ -489,8 +489,8 @@ function decode_attr_error_code(buffer, start, end) { * decode UNKNOWN-ATTRIBUTES attribute */ function decode_attr_unknown_attr(buffer, start, end) { - var unknown_attrs = []; - var offset = start; + const unknown_attrs = []; + let offset = start; while (offset < end) { unknown_attrs.push(buffer.readUInt16BE(offset)); offset += 2; @@ -525,8 +525,8 @@ function encode_attr_xor_mapped_addr(addr, buffer, offset, end) { buffer.writeUInt16BE(addr.port ^ buffer.readUInt16BE(stun.XOR_KEY_OFFSET), offset + 2); ip_module.toBuffer(addr.address, buffer, offset + 4); - var k = stun.XOR_KEY_OFFSET; - for (var i = offset + 4; i < end; ++i) { + let k = stun.XOR_KEY_OFFSET; + for (let i = offset + 4; i < end; ++i) { buffer[i] ^= buffer[k]; k += 1; } @@ -538,7 +538,7 @@ function encode_attr_xor_mapped_addr(addr, buffer, offset, end) { */ function encode_attr_error_code(err, buffer, start, end) { // eslint-disable-next-line no-bitwise - var code = (((err.code / 100) | 0) << 8) | ((err.code % 100) & 0xff); + const code = (((err.code / 100) | 0) << 8) | ((err.code % 100) & 0xff); buffer.writeUInt32BE(code, start); buffer.writeUInt32BE(err.reason, start + 4); } @@ -548,7 +548,7 @@ function encode_attr_error_code(err, buffer, start, end) { * offsets are aligned up to 4 bytes */ function align_offset(offset) { - var rem = offset % 4; + const rem = offset % 4; if (rem) { return offset + 4 - rem; } else { @@ -560,9 +560,9 @@ function align_offset(offset) { * */ function test() { - var argv = require('minimist')(process.argv); // eslint-disable-line global-require - var socket = dgram.createSocket('udp4'); - var stun_url = stun.PUBLIC_SERVERS[0]; + const argv = require('minimist')(process.argv); // eslint-disable-line global-require + const socket = dgram.createSocket('udp4'); + let stun_url = stun.PUBLIC_SERVERS[0]; if (argv.stun_host) { stun_url = { hostname: argv.stun_host, @@ -578,7 +578,7 @@ function test() { return; } console.log('STUN', get_method_name(buffer), 'from', rinfo.address + ':' + rinfo.port); - var attrs = decode_attrs(buffer); + const attrs = decode_attrs(buffer); _.each(attrs, function(attr) { console.log(' *', attr.attr, @@ -586,9 +586,9 @@ function test() { '[len ' + attr.length + ']', util.inspect(attr.value, { depth: null })); }); - var method = get_method_field(buffer); + const method = get_method_field(buffer); if (method === stun.METHODS.REQUEST) { - var reply = new_packet(stun.METHODS.SUCCESS, [{ + const reply = new_packet(stun.METHODS.SUCCESS, [{ type: stun.ATTRS.XOR_MAPPED_ADDRESS, value: { family: 'IPv4', @@ -605,8 +605,8 @@ function test() { } }) .then(function() { - var req = new_packet(stun.METHODS.REQUEST); - var ind = new_packet(stun.METHODS.INDICATION); + const req = new_packet(stun.METHODS.REQUEST); + const ind = new_packet(stun.METHODS.INDICATION); return loop(); function loop() { diff --git a/src/sdk/dedup_options.js b/src/sdk/dedup_options.js index ef0e65d6f5..a1beffa6db 100644 --- a/src/sdk/dedup_options.js +++ b/src/sdk/dedup_options.js @@ -1,10 +1,10 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var js_utils = require('../util/js_utils'); +const js_utils = require('../util/js_utils'); // DO NOT CHANGE UNLESS YOU *KNOW* RABIN CHUNKING -var dedup_config = js_utils.deep_freeze({ +const dedup_config = js_utils.deep_freeze({ // min_chunk bytes are skipped before looking for new boundary. // max_chunk is the length which will be chunked if not chunked by context. diff --git a/src/sdk/namespace_blob.js b/src/sdk/namespace_blob.js index 58740db38d..85213950c0 100644 --- a/src/sdk/namespace_blob.js +++ b/src/sdk/namespace_blob.js @@ -567,7 +567,7 @@ class NamespaceBlob { const obj_id = Buffer.from(params.obj_id, 'base64').toString(); if (schema_utils.is_object_id(obj_id)) { - let obj_md = await object_sdk.rpc_client.object.read_object_md({ + const obj_md = await object_sdk.rpc_client.object.read_object_md({ obj_id, bucket: params.bucket, key: params.key diff --git a/src/sdk/namespace_fs.js b/src/sdk/namespace_fs.js index 4135777f58..03edf4f1e7 100644 --- a/src/sdk/namespace_fs.js +++ b/src/sdk/namespace_fs.js @@ -93,7 +93,7 @@ async function is_directory_or_symlink_to_directory(stat, fs_context, entry_path try { let r = isDirectory(stat); if (!r && is_symbolic_link(stat)) { - let targetStat = await nb_native().fs.stat(fs_context, entry_path); + const targetStat = await nb_native().fs.stat(fs_context, entry_path); if (!targetStat) throw new Error('is_directory_or_symlink_to_directory: targetStat is empty'); r = isDirectory(targetStat); } @@ -595,7 +595,7 @@ class NamespaceFS { let num_bytes = 0; let num_buffers = 0; - let log2_size_histogram = {}; + const log2_size_histogram = {}; let drain_promise = null; dbg.log0('NamespaceFS: read_object_stream', { file_path, start, end }); @@ -759,7 +759,7 @@ class NamespaceFS { } let open_path = upload_path || file_path; - let copy_res = params.copy_source && (await this._try_copy_file(fs_context, params, file_path, upload_path)); + const copy_res = params.copy_source && (await this._try_copy_file(fs_context, params, file_path, upload_path)); if (copy_res) { if (copy_res === copy_status_enum.FALLBACK) { params.copy_source.nsfs_copy_fallback(); @@ -884,7 +884,7 @@ class NamespaceFS { async _wrap_safe_op_with_retries(fs_context, handler, params, retry_err_msg, success_err_codes) { let retries = config.NSFS_RENAME_RETRIES; - let { from_path, to_path = undefined, mtimeNsBigint, ino } = params; + const { from_path, to_path = undefined, mtimeNsBigint, ino } = params; for (;;) { try { dbg.log1('Namespace_fs.wrap_safe_with_retries: ', handler, fs_context, from_path, to_path, mtimeNsBigint, ino); @@ -1125,7 +1125,7 @@ class NamespaceFS { const fs_context = this.prepare_fs_context(object_sdk); const open_mode = 'w'; try { - let MD5Async = config.NSFS_CALCULATE_MD5 ? new (nb_native().crypto.MD5Async)() : undefined; + const MD5Async = config.NSFS_CALCULATE_MD5 ? new (nb_native().crypto.MD5Async)() : undefined; const { multiparts = [] } = params; multiparts.sort((a, b) => a.num - b.num); await this._load_multipart(params, fs_context); @@ -1594,10 +1594,10 @@ class NamespaceFS { } async _folder_delete(dir, fs_context) { - let entries = await nb_native().fs.readdir(fs_context, dir); - let results = await Promise.all(entries.map(entry => { - let fullPath = path.join(dir, entry.name); - let task = isDirectory(entry) ? this._folder_delete(fullPath, fs_context) : + const entries = await nb_native().fs.readdir(fs_context, dir); + const results = await Promise.all(entries.map(entry => { + const fullPath = path.join(dir, entry.name); + const task = isDirectory(entry) ? this._folder_delete(fullPath, fs_context) : nb_native().fs.unlink(fs_context, fullPath); return task.catch(error => ({ error })); })); @@ -1674,7 +1674,7 @@ class NamespaceFS { try { // Returns the real path of the entry. // The entry path may point to regular file or directory, but can have symbolic links - let full_path = await nb_native().fs.realpath(fs_context, entry_path); + const full_path = await nb_native().fs.realpath(fs_context, entry_path); if (!full_path.startsWith(this.bucket_path)) { dbg.log0('check_bucket_boundaries: the path', entry_path, 'is not in the bucket', this.bucket_path, 'boundaries'); return false; @@ -1835,7 +1835,7 @@ class NamespaceFS { // 1.2 if version exists - unlink version // 2. try promote second latest to latest if one of the deleted versions is the latest version (with version id specified) or a delete marker async _delete_objects_versioning_enabled(fs_context, key, versions) { - let res = []; + const res = []; let deleted_delete_marker = false; let delete_marker_created = false; let latest_ver_info; diff --git a/src/sdk/namespace_merge.js b/src/sdk/namespace_merge.js index 730328e7e3..38722b6542 100644 --- a/src/sdk/namespace_merge.js +++ b/src/sdk/namespace_merge.js @@ -259,7 +259,7 @@ class NamespaceMerge { _handle_single_namespace_deletes(params) { - let response = []; + const response = []; const { deleted_ns, head_ns } = params; for (let i = 0; i < deleted_ns.length; ++i) { const res = deleted_ns[i]; @@ -424,10 +424,10 @@ class NamespaceMerge { // Which is not what we wanted since we want to see all of the versions _handle_list(res, params) { if (res.length === 1) return res[0]; - var i; - var j; + let i; + let j; const map = {}; - var is_truncated; + let is_truncated; for (i = 0; i < res.length; ++i) { for (j = 0; j < res[i].objects.length; ++j) { const obj = res[i].objects[j]; @@ -486,9 +486,9 @@ class NamespaceMerge { 'NoSuchBucket': S3Error.NoSuchBucket, 'ContainerNotFound': S3Error.NoSuchBucket, }; - let exist = err_to_s3err_map[err.code]; + const exist = err_to_s3err_map[err.code]; if (!exist) return err; - let s3error = new S3Error(exist); + const s3error = new S3Error(exist); s3error.message = err.message; return s3error; } diff --git a/src/sdk/namespace_multipart.js b/src/sdk/namespace_multipart.js index fc4cce91a3..3c5b61357a 100644 --- a/src/sdk/namespace_multipart.js +++ b/src/sdk/namespace_multipart.js @@ -139,7 +139,7 @@ class NamespaceMultipart { } _ns_get(func) { - var i = -1; + let i = -1; const try_next = err => { i += 1; if (i >= this.total_resources.length) { @@ -186,10 +186,10 @@ class NamespaceMultipart { _handle_list(res, params) { res = this._throw_if_all_failed_or_get_succeeded(res); if (res.length === 1) return res[0]; - var i; - var j; + let i; + let j; const map = {}; - var is_truncated; + let is_truncated; for (i = 0; i < res.length; ++i) { for (j = 0; j < res[i].objects.length; ++j) { const obj = res[i].objects[j]; diff --git a/src/sdk/object_io.js b/src/sdk/object_io.js index 4573abb886..e74cbd8449 100644 --- a/src/sdk/object_io.js +++ b/src/sdk/object_io.js @@ -765,7 +765,7 @@ function slice_buffers_in_range(chunks, start, end) { const buffers = []; for (const chunk of chunks) { const part = chunk.parts[0]; - let part_range = range_utils.intersection(part.start, part.end, pos, end); + const part_range = range_utils.intersection(part.start, part.end, pos, end); if (!part_range) { if (end <= part.start) { // --- start ------------- pos --------- end --- diff --git a/src/sdk/object_sdk.js b/src/sdk/object_sdk.js index 0da0262c32..ab220d8709 100644 --- a/src/sdk/object_sdk.js +++ b/src/sdk/object_sdk.js @@ -333,7 +333,7 @@ class ObjectSDK { * @returns {nb.Namespace} */ _setup_merge_namespace(bucket) { - let rr = _.cloneDeep(bucket.namespace.read_resources); + const rr = _.cloneDeep(bucket.namespace.read_resources); /** @type {nb.Namespace} */ let wr = bucket.namespace.write_resource && this._setup_single_namespace(bucket.namespace.write_resource); diff --git a/src/server/analytic_services/activity_log_store.js b/src/server/analytic_services/activity_log_store.js index 6876161e89..c0411730b9 100644 --- a/src/server/analytic_services/activity_log_store.js +++ b/src/server/analytic_services/activity_log_store.js @@ -46,7 +46,7 @@ class ActivityLogStore { read_activity_log(query) { const { skip = 0, limit = 100 } = query; - let selector = this._create_selector(query); + const selector = this._create_selector(query); return P.resolve().then(async () => this._activitylogs.find(selector, { skip, limit, sort: { time: -1 } })); } diff --git a/src/server/analytic_services/prometheus_reports/noobaa_core_report.js b/src/server/analytic_services/prometheus_reports/noobaa_core_report.js index d9c0618bf3..cf2d312f4a 100644 --- a/src/server/analytic_services/prometheus_reports/noobaa_core_report.js +++ b/src/server/analytic_services/prometheus_reports/noobaa_core_report.js @@ -512,7 +512,7 @@ class NooBaaCoreReport extends BasePrometheusReport { set_providers_physical_logical(providers_stats) { if (!this._metrics) return; - for (let [type, value] of Object.entries(providers_stats)) { + for (const [type, value] of Object.entries(providers_stats)) { const { logical_size, physical_size } = value; this._metrics.providers_physical_size.set({ type }, physical_size); this._metrics.providers_logical_size.set({ type }, logical_size); diff --git a/src/server/bg_services/bucket_chunks_builder.js b/src/server/bg_services/bucket_chunks_builder.js index d3bde65422..30b8ab97a0 100644 --- a/src/server/bg_services/bucket_chunks_builder.js +++ b/src/server/bg_services/bucket_chunks_builder.js @@ -33,7 +33,7 @@ class BucketChunksBuilder { // run iteration for given buckets array. buckets can change between iterations async run_batch(buckets) { - let res = { successful: true, chunk_ids: [] }; + const res = { successful: true, chunk_ids: [] }; if (!buckets || !buckets.length) { return res; } diff --git a/src/server/bg_services/cluster_hb.js b/src/server/bg_services/cluster_hb.js index 5c79ebb109..f5651f3b37 100644 --- a/src/server/bg_services/cluster_hb.js +++ b/src/server/bg_services/cluster_hb.js @@ -25,11 +25,11 @@ exports.do_heartbeat = do_heartbeat; * */ function do_heartbeat({ skip_server_monitor } = {}) { - let current_clustering = system_store.get_local_cluster_info(); + const current_clustering = system_store.get_local_cluster_info(); let server_below_min_req = false; let server_name; if (current_clustering) { - let heartbeat = { + const heartbeat = { version: pkg.version, time: Date.now(), health: { @@ -54,7 +54,7 @@ function do_heartbeat({ skip_server_monitor } = {}) { os_utils.get_raw_storage() ])) .then(([drives, raw_storage]) => { - let root = drives.find(drive => drive.mount === '/'); + const root = drives.find(drive => drive.mount === '/'); if (root) { root.storage.total = raw_storage; } @@ -66,12 +66,12 @@ function do_heartbeat({ skip_server_monitor } = {}) { if (info.storage) { heartbeat.health.storage = info.storage; } - let update = { + const update = { _id: current_clustering._id, heartbeat: heartbeat }; //Check if server is below minimum requirements - let min_requirements = clustering_utils.get_min_requirements(); + const min_requirements = clustering_utils.get_min_requirements(); if (info.storage.total < min_requirements.storage || heartbeat.health.os_info.totalmem < min_requirements.ram || heartbeat.health.os_info.cpus.length < min_requirements.cpu_count) { @@ -98,7 +98,7 @@ function do_heartbeat({ skip_server_monitor } = {}) { }) .then(() => { if (server_below_min_req) { - let name = server_name + '-' + current_clustering.owner_secret; + const name = server_name + '-' + current_clustering.owner_secret; return Dispatcher.instance().alert('MAJOR', system_store.data.systems[0]._id, `Server ${name} configuration is below minimum requirements. This can result in overall performance issues, diff --git a/src/server/bg_services/cluster_master.js b/src/server/bg_services/cluster_master.js index c8b3da7b37..098fd7aa62 100644 --- a/src/server/bg_services/cluster_master.js +++ b/src/server/bg_services/cluster_master.js @@ -10,7 +10,7 @@ const bg_workers = require('../bg_workers'); // const auth_server = require('../common_services/auth_server'); const cutil = require('../utils/clustering_utils'); -var is_cluster_master = false; +let is_cluster_master = false; exports.background_worker = background_worker; diff --git a/src/server/bg_services/db_cleaner.js b/src/server/bg_services/db_cleaner.js index ad3a03334f..ddb857ecc6 100644 --- a/src/server/bg_services/db_cleaner.js +++ b/src/server/bg_services/db_cleaner.js @@ -30,7 +30,7 @@ async function background_worker() { if (!system || system_utils.system_in_maintenance(system._id)) return; const now = Date.now(); - let last_date_to_remove = now - config.DB_CLEANER.BACK_TIME; + const last_date_to_remove = now - config.DB_CLEANER.BACK_TIME; if (this.last_check && now - this.last_check < config.DB_CLEANER.CYCLE) return config.DB_CLEANER.CYCLE; this.last_check = now; const { from_time } = md_aggregator.find_minimal_range({ diff --git a/src/server/bg_services/md_aggregator.js b/src/server/bg_services/md_aggregator.js index 914d0cbbcf..8958af7ef0 100644 --- a/src/server/bg_services/md_aggregator.js +++ b/src/server/bg_services/md_aggregator.js @@ -499,10 +499,10 @@ function aggregate_by_content_type({ function get_hist_array_from_aggregate(agg, key) { const key_prefix = key + '_pow2_'; - let bins_arr = []; - for (var prop in agg) { + const bins_arr = []; + for (const prop in agg) { if (prop.startsWith(key_prefix)) { - let index = parseInt(prop.replace(key_prefix, ''), 10); + const index = parseInt(prop.replace(key_prefix, ''), 10); bins_arr[index] = agg[prop]; } } @@ -511,13 +511,13 @@ function get_hist_array_from_aggregate(agg, key) { function build_objects_hist(bucket, existing_agg, deleted_agg) { // get the current histogram from DB - let current_objects_hist = (bucket.storage_stats && bucket.storage_stats.objects_hist) || []; + const current_objects_hist = (bucket.storage_stats && bucket.storage_stats.objects_hist) || []; // get the latest additions\deletions in an array form - let existing_size_hist = get_hist_array_from_aggregate(existing_agg[bucket._id], 'size'); - let deleted_size_hist = get_hist_array_from_aggregate(deleted_agg[bucket._id], 'size'); - let existing_count_hist = get_hist_array_from_aggregate(existing_agg[bucket._id], 'count'); - let deleted_count_hist = get_hist_array_from_aggregate(deleted_agg[bucket._id], 'count'); + const existing_size_hist = get_hist_array_from_aggregate(existing_agg[bucket._id], 'size'); + const deleted_size_hist = get_hist_array_from_aggregate(deleted_agg[bucket._id], 'size'); + const existing_count_hist = get_hist_array_from_aggregate(existing_agg[bucket._id], 'count'); + const deleted_count_hist = get_hist_array_from_aggregate(deleted_agg[bucket._id], 'count'); // size and count should have the same length, since they are emitted together in mongo mapreduce if (deleted_size_hist.length !== deleted_count_hist.length || @@ -529,11 +529,11 @@ function build_objects_hist(bucket, existing_agg, deleted_agg) { 'existing_count_hist.length =', existing_count_hist.length); } - let num_bins = Math.max(deleted_size_hist.length, existing_size_hist.length, current_objects_hist.length); + const num_bins = Math.max(deleted_size_hist.length, existing_size_hist.length, current_objects_hist.length); if (num_bins === 0) return current_objects_hist; - let new_size_hist = []; - for (var i = 0; i < num_bins; i++) { - let bin = { + const new_size_hist = []; + for (let i = 0; i < num_bins; i++) { + const bin = { label: (current_objects_hist[i] && current_objects_hist[i].label) || get_hist_label(i), aggregated_sum: get_new_bin( existing_size_hist[i] || 0, @@ -561,11 +561,11 @@ function get_new_bin(existing, deleted, current) { if (!existing && !deleted) { return current; } - let bigint_existing_size_bin = size_utils.json_to_bigint(existing); - let bigint_deleted_size_bin = size_utils.json_to_bigint(deleted); - let delta_size_bin = bigint_existing_size_bin + const bigint_existing_size_bin = size_utils.json_to_bigint(existing); + const bigint_deleted_size_bin = size_utils.json_to_bigint(deleted); + const delta_size_bin = bigint_existing_size_bin .minus(bigint_deleted_size_bin); - let new_bin = size_utils.json_to_bigint(current) + const new_bin = size_utils.json_to_bigint(current) .plus(delta_size_bin) .toJSON(); return new_bin; diff --git a/src/server/bg_services/replication_log_parser.js b/src/server/bg_services/replication_log_parser.js index a0e4308aa3..0ee2982cd2 100644 --- a/src/server/bg_services/replication_log_parser.js +++ b/src/server/bg_services/replication_log_parser.js @@ -170,7 +170,7 @@ function aws_parse_log_object(logs, log_object, ignore_fn) { for (const line of log_array) { if (line !== '') { - let log = parse_aws_log_entry(line); + const log = parse_aws_log_entry(line); if (log.operation) { // ignore the log entry if it should be ignored if (ignore_fn(log)) continue; diff --git a/src/server/bg_services/replication_scanner.js b/src/server/bg_services/replication_scanner.js index 6f83996fc5..f686e3c233 100644 --- a/src/server/bg_services/replication_scanner.js +++ b/src/server/bg_services/replication_scanner.js @@ -205,10 +205,10 @@ class ReplicationScanner { async get_keys_diff(src_keys, dst_keys, dst_next_cont_token, src_bucket_name, dst_bucket_name) { dbg.log1('replication_server.get_keys_diff: src contents', src_keys.map(c => c.Key), 'dst contents', dst_keys.map(c => c.Key)); - let to_replicate_map = {}; + const to_replicate_map = {}; const dst_map = _.keyBy(dst_keys, 'Key'); - for (let [i, src_content] of src_keys.entries()) { + for (const [i, src_content] of src_keys.entries()) { const cur_src_key = src_content.Key; dbg.log1('replication_server.get_keys_diff, src_key: ', i, cur_src_key); diff --git a/src/server/bg_workers.js b/src/server/bg_workers.js index 99595de719..c7a8853713 100644 --- a/src/server/bg_workers.js +++ b/src/server/bg_workers.js @@ -57,7 +57,7 @@ const MASTER_BG_WORKERS = [ ]; if (process.env.NOOBAA_LOG_LEVEL) { - let dbg_conf = debug_config.get_debug_config(process.env.NOOBAA_LOG_LEVEL); + const dbg_conf = debug_config.get_debug_config(process.env.NOOBAA_LOG_LEVEL); dbg_conf.core.map(module => dbg.set_module_level(dbg_conf.level, module)); } diff --git a/src/server/common_services/auth_server.js b/src/server/common_services/auth_server.js index 0be425d930..f463361e82 100644 --- a/src/server/common_services/auth_server.js +++ b/src/server/common_services/auth_server.js @@ -38,13 +38,13 @@ const s3_utils = require('../../endpoint/s3/s3_utils'); */ function create_auth(req) { - var email = req.rpc_params.email; - var password = req.rpc_params.password; - var system_name = req.rpc_params.system; - var role_name = req.rpc_params.role; - var authenticated_account; - var target_account; - var system; + const email = req.rpc_params.email; + const password = req.rpc_params.password; + const system_name = req.rpc_params.system; + let role_name = req.rpc_params.role; + let authenticated_account; + let target_account; + let system; return P.resolve() .then(() => { @@ -88,7 +88,7 @@ function create_auth(req) { throw new RpcError('UNAUTHORIZED', 'credentials not found'); } - var account_arg = system_store.data.get_by_id(req.auth.account_id); + const account_arg = system_store.data.get_by_id(req.auth.account_id); target_account = target_account || account_arg; authenticated_account = authenticated_account || account_arg; @@ -112,7 +112,7 @@ function create_auth(req) { if (!system || system.deleted) throw new RpcError('UNAUTHORIZED', 'system not found'); // find the role of authenticated_account in the system - var roles = system.roles_by_account && + const roles = system.roles_by_account && system.roles_by_account[authenticated_account._id]; // now approve the role - @@ -171,7 +171,7 @@ async function create_k8s_auth(req) { // Currently I have no means to get the system name in the FE without an email and password. // So i default to the first (and currently only system) - let system = system_store.data.systems[0]; + const system = system_store.data.systems[0]; if (!system || system.deleted) { throw new RpcError('UNAUTHORIZED', 'system not found'); } @@ -316,15 +316,15 @@ function unauthorized_error(reason) { * */ function create_access_key_auth(req) { - var access_key = req.rpc_params.access_key.unwrap(); - var string_to_sign = req.rpc_params.string_to_sign; - var signature = req.rpc_params.signature; + const access_key = req.rpc_params.access_key.unwrap(); + const string_to_sign = req.rpc_params.string_to_sign; + const signature = req.rpc_params.signature; if (_.isUndefined(string_to_sign) || _.isUndefined(signature)) { throw new RpcError('UNAUTHORIZED', 'signature error'); } - var account = _.find(system_store.data.accounts, function(acc) { + const account = _.find(system_store.data.accounts, function(acc) { if (acc.access_keys) { return acc.access_keys[0].access_key.unwrap().toString() === access_key.toString(); } else { @@ -336,8 +336,8 @@ function create_access_key_auth(req) { throw new RpcError('UNAUTHORIZED', 'account not found'); } - let secret = account.access_keys[0].secret_key.unwrap().toString(); - let signature_test = signature_utils.get_signature_from_auth_token({ string_to_sign: string_to_sign }, secret); + const secret = account.access_keys[0].secret_key.unwrap().toString(); + const signature_test = signature_utils.get_signature_from_auth_token({ string_to_sign: string_to_sign }, secret); if (signature_test !== signature) { throw new RpcError('UNAUTHORIZED', 'signature error'); } @@ -349,7 +349,7 @@ function create_access_key_auth(req) { 'string_to_sign', string_to_sign, 'signature', signature); - var role = _.find(system_store.data.roles, function(r) { + const role = _.find(system_store.data.roles, function(r) { return r.account._id.toString() === account._id.toString(); }); @@ -357,13 +357,13 @@ function create_access_key_auth(req) { throw new RpcError('UNAUTHORIZED', 'role not found'); } - var system = role.system; + const system = role.system; if (!system) { throw new RpcError('UNAUTHORIZED', 'system not found'); } - var auth_extra; + let auth_extra; if (req.rpc_params.extra) { auth_extra = req.rpc_params.extra; auth_extra.signature = req.rpc_params.signature; @@ -375,7 +375,7 @@ function create_access_key_auth(req) { }; } - var token = make_auth_token({ + const token = make_auth_token({ system_id: system._id, account_id: account._id, role: 'admin', @@ -641,7 +641,7 @@ function _get_auth_info(account, system, authorized_by, role, extra) { response.account.is_support = true; } - let next_password_change = account.next_password_change; + const next_password_change = account.next_password_change; if (next_password_change && next_password_change < Date.now()) { response.account.must_change_password = true; } @@ -731,14 +731,14 @@ function has_bucket_anonymous_permission(bucket, action, bucket_path = "") { * @return token */ function make_auth_token(options) { - var auth = _.pick(options, 'account_id', 'system_id', 'role', 'extra', 'authorized_by'); + let auth = _.pick(options, 'account_id', 'system_id', 'role', 'extra', 'authorized_by'); auth.authorized_by = auth.authorized_by || 'noobaa'; // don't incude keys if value is falsy, to minimize the token size auth = _.omitBy(auth, value => !value); // set expiry if provided - var jwt_options = {}; + const jwt_options = {}; if (options.expiry) { jwt_options.expiresIn = options.expiry; } diff --git a/src/server/common_services/server_inter_process.js b/src/server/common_services/server_inter_process.js index b1ae15e390..7f3d90cdbe 100644 --- a/src/server/common_services/server_inter_process.js +++ b/src/server/common_services/server_inter_process.js @@ -6,10 +6,10 @@ */ 'use strict'; -var mongo_ctrl = require('../utils/mongo_ctrl'); -var P = require('../../util/promise'); -var dotenv = require('../../util/dotenv'); -var dbg = require('../../util/debug_module')(__filename); +const mongo_ctrl = require('../utils/mongo_ctrl'); +const P = require('../../util/promise'); +const dotenv = require('../../util/dotenv'); +const dbg = require('../../util/debug_module')(__filename); const system_store = require('../system_services/system_store').get_instance(); const server_rpc = require('../server_rpc'); @@ -24,7 +24,7 @@ async function load_system_store(req) { } function update_mongo_connection_string(req) { - let old_url = process.env.MONGO_RS_URL || ''; + const old_url = process.env.MONGO_RS_URL || ''; dotenv.load(); dbg.log0('Recieved update mongo string. will update mongo url from', old_url, ' to ', process.env.MONGO_RS_URL); return P.resolve(mongo_ctrl.update_connection_string()) @@ -41,8 +41,8 @@ function update_mongo_connection_string(req) { function update_master_change(req) { system_store.is_cluster_master = req.rpc_params.is_master; if (req.rpc_params.master_address) { - let new_master_address = req.rpc_params.master_address; - let old_master_address = server_rpc.rpc.router.master; + const new_master_address = req.rpc_params.master_address; + const old_master_address = server_rpc.rpc.router.master; // old_master_address is of the form ws://addr:port. check if new_master_address is differnet if (old_master_address.indexOf(new_master_address) === -1) { dbg.log0(`master changed from ${old_master_address} to ${new_master_address}. updating server_rpc`); diff --git a/src/server/func_services/func_server.js b/src/server/func_services/func_server.js index 6eb30135ac..fb230c7911 100644 --- a/src/server/func_services/func_server.js +++ b/src/server/func_services/func_server.js @@ -130,7 +130,7 @@ async function update_func(req) { await func_store.instance().update_func(func._id, config_updates); await _load_func(req); - let act = { + const act = { level: 'info', system: req.system._id, actor: req.account && req.account._id, diff --git a/src/server/index.js b/src/server/index.js index f770099533..9b958858cb 100644 --- a/src/server/index.js +++ b/src/server/index.js @@ -1,9 +1,9 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -let _ = require('lodash'); -let child_process = require('child_process'); -let argv = require('minimist')(process.argv); +const _ = require('lodash'); +const child_process = require('child_process'); +const argv = require('minimist')(process.argv); /** * @@ -38,7 +38,7 @@ if (require.main === module) { } function main() { - let excludes = argv.exclude ? argv.exclude.split(',') : []; + const excludes = argv.exclude ? argv.exclude.split(',') : []; console.log('Excluding services:', excludes.join(' ')); _.each(SERVICES, service => { if (excludes.indexOf(service.name) === -1) run_service(service); diff --git a/src/server/license_info.js b/src/server/license_info.js index d88ba0b504..4809449562 100644 --- a/src/server/license_info.js +++ b/src/server/license_info.js @@ -146,7 +146,7 @@ function format_csv(info) { ].map(x => `"${x || ''}"`).join(',') + '\n'); info.licenses.forEach(l => { if (l.name || l.url) { - var k = `${l.name}\0${l.version}\0${l.url}`; + const k = `${l.name}\0${l.version}\0${l.url}`; if (dups.has(k)) return; dups.set(k, l); } diff --git a/src/server/node_services/host_server.js b/src/server/node_services/host_server.js index 67f3eb136d..c981026c45 100644 --- a/src/server/node_services/host_server.js +++ b/src/server/node_services/host_server.js @@ -72,8 +72,8 @@ async function update_host_services(req) { function diagnose_host(req) { const { name } = req.rpc_params; const monitor = nodes_server.get_local_monitor(); - var out_path = `/public/host_${name.replace('#', '_')}_diagnostics.tgz`; - var inner_path = `${process.cwd()}/build${out_path}`; + const out_path = `/public/host_${name.replace('#', '_')}_diagnostics.tgz`; + const inner_path = `${process.cwd()}/build${out_path}`; return P.resolve() .then(() => diag.collect_server_diagnostics(req)) diff --git a/src/server/node_services/node_allocator.js b/src/server/node_services/node_allocator.js index 7bd43d9ad0..2750e2197f 100644 --- a/src/server/node_services/node_allocator.js +++ b/src/server/node_services/node_allocator.js @@ -38,11 +38,11 @@ const nodes_alloc_round_robin_symbol = Symbol('nodes_alloc_round_robin_symbol'); */ /** @type {{ [pool_id: string]: PoolAllocGroup }} */ -let alloc_group_by_pool = {}; +const alloc_group_by_pool = {}; /** @type {{ [pool_set: string]: PoolSetAllocGroup }} */ -let alloc_group_by_pool_set = {}; +const alloc_group_by_pool_set = {}; /** @type {{ [tiering_id: string]: TieringAllocGroup }} */ -let alloc_group_by_tiering = {}; +const alloc_group_by_tiering = {}; /** * @param {nb.System} system @@ -297,7 +297,7 @@ function _get_tier_pools_status(pools, required_valid_nodes) { const pools_status_by_id = {}; _.each(pools, pool => { let valid_for_allocation = true; - let alloc_group = alloc_group_by_pool[String(pool._id)]; + const alloc_group = alloc_group_by_pool[String(pool._id)]; const num_nodes = alloc_group ? _.sumBy(alloc_group.latency_groups, 'nodes.length') : 0; if (pool.cloud_pool_info) { if (num_nodes !== config.NODES_PER_CLOUD_POOL) { @@ -329,7 +329,7 @@ function _get_tier_pools_status(pools, required_valid_nodes) { * @returns {nb.NodeAPI} */ function allocate_node({ avoid_nodes, allocated_hosts, pools = [] }) { - let pool_set = _.map(pools, pool => String(pool._id)).sort().join(','); + const pool_set = _.map(pools, pool => String(pool._id)).sort().join(','); let alloc_group = alloc_group_by_pool_set[pool_set]; if (!alloc_group) { @@ -344,7 +344,7 @@ function allocate_node({ avoid_nodes, allocated_hosts, pools = [] }) { // Since we will merge the two groups we will eventually have two average groups // This is bad since we will have two groups with each having fast and slow drives pools.forEach(pool => { - let group = alloc_group_by_pool[pool._id.toHexString()]; + const group = alloc_group_by_pool[pool._id.toHexString()]; if (group && group.latency_groups) { group.latency_groups.forEach((value, index) => { if (pools_latency_groups[index]) { @@ -403,8 +403,8 @@ function allocate_node({ avoid_nodes, allocated_hosts, pools = [] }) { * @param {Object} options */ function allocate_from_list(nodes, avoid_nodes, allocated_hosts, options) { - for (var i = 0; i < nodes.length; ++i) { - var node = get_round_robin(nodes); + for (let i = 0; i < nodes.length; ++i) { + const node = get_round_robin(nodes); if (Boolean(options.use_nodes_with_errors) === Boolean(node[report_error_on_node_alloc_symbol]) && !_.includes(avoid_nodes, String(node._id)) && @@ -421,7 +421,7 @@ function allocate_from_list(nodes, avoid_nodes, allocated_hosts, options) { * @returns {nb.NodeAPI} */ function get_round_robin(nodes) { - var rr = (nodes[nodes_alloc_round_robin_symbol] || 0) % nodes.length; + const rr = (nodes[nodes_alloc_round_robin_symbol] || 0) % nodes.length; nodes[nodes_alloc_round_robin_symbol] = rr + 1; return nodes[rr]; } diff --git a/src/server/node_services/nodes_monitor.js b/src/server/node_services/nodes_monitor.js index 3d374041c1..e815d9ebef 100644 --- a/src/server/node_services/nodes_monitor.js +++ b/src/server/node_services/nodes_monitor.js @@ -218,7 +218,7 @@ class NodesMonitor extends EventEmitter { // initialize nodes stats in prometheus if (config.PROMETHEUS_ENABLED && system_store.data.systems[0]) { - let nodes_stats = await this._get_nodes_stats_by_service( + const nodes_stats = await this._get_nodes_stats_by_service( system_store.data.systems[0]._id, 0, Date.now(), @@ -330,7 +330,7 @@ class NodesMonitor extends EventEmitter { // new node heartbeat // create the node and then update the heartbeat if (!node_id && (req.role === 'create_node' || req.role === 'admin')) { - let agent_config = (extra.agent_config_id && system_store.data.get_by_id(extra.agent_config_id)) || {}; + const agent_config = (extra.agent_config_id && system_store.data.get_by_id(extra.agent_config_id)) || {}; this._add_new_node(req.connection, req.system._id, agent_config, req.rpc_params.pool_name); dbg.log0('connecting new node with agent_config =', { ...agent_config, @@ -695,7 +695,7 @@ class NodesMonitor extends EventEmitter { } _remove_node_to_hosts_map(host_id, item) { - let host_nodes = this._map_host_id.get(host_id); + const host_nodes = this._map_host_id.get(host_id); if (host_nodes) { _.pull(host_nodes, item); if (!host_nodes.length) { @@ -731,7 +731,7 @@ class NodesMonitor extends EventEmitter { } _get_nodes_by_host_id(host_id) { - let host_nodes = this._map_host_id.get(host_id); + const host_nodes = this._map_host_id.get(host_id); if (!host_nodes) { throw new RpcError('BAD_REQUEST', 'No such host ' + host_id); } @@ -1036,7 +1036,7 @@ class NodesMonitor extends EventEmitter { if (item.node.deleted) return; if (!item.connection) return; dbg.log1('_get_agent_info:', item.node.name); - let potential_masters = clustering_utils.get_potential_masters().map(addr => ({ + const potential_masters = clustering_utils.get_potential_masters().map(addr => ({ address: url.format({ protocol: 'wss', slashes: true, @@ -1141,7 +1141,7 @@ class NodesMonitor extends EventEmitter { updates.name = info.name; updates.host_id = info.host_id; this._map_node_name.delete(String(item.node.name)); - let base_name = updates.name || 'node'; + const base_name = updates.name || 'node'; let counter = 1; while (this._map_node_name.has(updates.name)) { updates.name = base_name + '-' + counter; @@ -1162,9 +1162,9 @@ class NodesMonitor extends EventEmitter { this._add_node_to_hosts_map(updates.host_id, item); item.added_host = true; } - let agent_config = system_store.data.get_by_id(item.node.agent_config) || {}; + const agent_config = system_store.data.get_by_id(item.node.agent_config) || {}; // on first call to get_agent_info enable\disable the node according to the configuration - let should_start_service = this._should_enable_agent(info, agent_config); + const should_start_service = this._should_enable_agent(info, agent_config); dbg.log1(`first call to get_agent_info. storage agent ${item.node.name}. should_start_service=${should_start_service}. `); if (!should_start_service) { item.node.decommissioned = Date.now(); @@ -1278,12 +1278,12 @@ class NodesMonitor extends EventEmitter { return; } dbg.log0('node does not have a valid create_node_token. creating new one and sending to agent'); - let auth_parmas = { + const auth_parmas = { system_id: String(item.node.system), account_id: system_store.data.get_by_id(item.node.system).owner._id, role: 'create_node', }; - let token = auth_server.make_auth_token(auth_parmas); + const token = auth_server.make_auth_token(auth_parmas); dbg.log0(`new create_node_token: ${token}`); await P.timeout(AGENT_RESPONSE_TIMEOUT, @@ -1789,7 +1789,7 @@ class NodesMonitor extends EventEmitter { } _should_enable_agent(info, agent_config) { - let { use_storage = true, exclude_drives = [] } = agent_config; + const { use_storage = true, exclude_drives = [] } = agent_config; if (info.node_type === 'BLOCK_STORE_FS') { if (!use_storage) return false; // if storage disable if configured to exclud storage if (info.storage.total < config.MINIMUM_AGENT_TOTAL_STORAGE) return false; // disable if not enough storage @@ -1800,7 +1800,7 @@ class NodesMonitor extends EventEmitter { _should_include_drives(mount, os_info, exclude_drives) { if (os_info.ostype.startsWith('Windows_NT')) { - let win_drives = exclude_drives.map(drv => { + const win_drives = exclude_drives.map(drv => { let ret = drv; if (drv.length === 1) { ret = drv + ':'; @@ -1845,7 +1845,7 @@ class NodesMonitor extends EventEmitter { if (item.node.issues_report) { // only print to log if the node had issues in the last hour - let last_issue = item.node.issues_report[item.node.issues_report.length - 1]; + const last_issue = item.node.issues_report[item.node.issues_report.length - 1]; if (now - last_issue.time < 60 * 60 * 1000) { dbg.log0('_update_status:', item.node.name, 'issues:', item.node.issues_report); } @@ -2488,7 +2488,7 @@ class NodesMonitor extends EventEmitter { host_item.untrusted_reasons = _.map( _.filter(host_nodes, item => !item.trusted), untrusted_item => { - let reason = { + const reason = { events: [], drive: untrusted_item.node.drives[0] }; @@ -2525,7 +2525,7 @@ class NodesMonitor extends EventEmitter { host_item.avg_disk_write = _.mean(host_nodes.map(item => item.avg_disk_write)); - let host_aggragate = this._aggregate_nodes_list(host_nodes); + const host_aggragate = this._aggregate_nodes_list(host_nodes); host_item.node.storage = host_aggragate.storage; host_item.storage_nodes.data_activities = host_aggragate.data_activities; host_item.node.drives = _.flatMap(host_nodes, item => item.node.drives); @@ -2949,7 +2949,7 @@ class NodesMonitor extends EventEmitter { error_read_bytes: 0, error_write_bytes: 0, }; - let storage = { + const storage = { total: 0, free: 0, used: 0, @@ -3028,7 +3028,7 @@ class NodesMonitor extends EventEmitter { const by_mode = {}; const storage_by_mode = {}; const by_service = { STORAGE: 0 }; - let storage = { + const storage = { total: 0, free: 0, used: 0, @@ -3130,7 +3130,7 @@ class NodesMonitor extends EventEmitter { _get_host_info(host_item, adminfo) { - let info = { + const info = { storage_nodes_info: { nodes: host_item.storage_nodes .filter(item => Boolean(item.node_from_store)) @@ -3684,7 +3684,7 @@ function progress_by_time(time, now) { } function is_localhost(address) { - let addr_url = url.parse(address); + const addr_url = url.parse(address); return net_utils.is_localhost(addr_url.hostname); } diff --git a/src/server/notifications/alerts_log_store.js b/src/server/notifications/alerts_log_store.js index 90c82b858e..77d7aa915e 100644 --- a/src/server/notifications/alerts_log_store.js +++ b/src/server/notifications/alerts_log_store.js @@ -44,8 +44,8 @@ class AlertsLogStore { get_unread_alerts_count(sysid) { return P.resolve().then(async () => { - let severities = ['CRIT', 'MAJOR', 'INFO']; - let unread_alerts = {}; + const severities = ['CRIT', 'MAJOR', 'INFO']; + const unread_alerts = {}; await Promise.all(severities.map(async sev => { const count = await this._alertslogs.countDocuments({ system: sysid, @@ -60,7 +60,7 @@ class AlertsLogStore { async update_alerts_state(sysid, query, state) { const selector = this._create_selector(sysid, query); - let update = { + const update = { $set: { read: state } @@ -94,7 +94,7 @@ class AlertsLogStore { let _id; if (ids) { - let obj_ids = ids.map(id => new mongodb.ObjectID(id)); + const obj_ids = ids.map(id => new mongodb.ObjectID(id)); _id = { $in: obj_ids }; } else if (till) { _id = { $lt: new mongodb.ObjectID(till) }; diff --git a/src/server/notifications/dispatcher.js b/src/server/notifications/dispatcher.js index d3527e432f..a9f62dc64f 100644 --- a/src/server/notifications/dispatcher.js +++ b/src/server/notifications/dispatcher.js @@ -51,7 +51,7 @@ class Dispatcher { //Activity Log activity(item) { - var self = this; + const self = this; item.desc = new SensitiveString(item.desc); dbg.log0('Adding ActivityLog entry', item); item.time = item.time || new Date(); @@ -60,7 +60,7 @@ class Dispatcher { if (!config.SEND_EVENTS_REMOTESYS) { return P.resolve(); } - var l = { + const l = { id: String(item._id), level: item.level, event: item.event, @@ -71,9 +71,9 @@ class Dispatcher { } read_activity_log(req) { - var self = this; + const self = this; - let query = _.pick(req.rpc_params, ['till', 'since', 'skip', 'limit']); + const query = _.pick(req.rpc_params, ['till', 'since', 'skip', 'limit']); if (req.rpc_params.event) { query.event = new RegExp(req.rpc_params.event); } @@ -81,7 +81,7 @@ class Dispatcher { return ActivityLogStore.instance().read_activity_log(query) .then(logs => P.map(logs, function(log_item) { - var l = { + const l = { id: String(log_item._id), level: log_item.level, event: log_item.event, diff --git a/src/server/notifications/event_server.js b/src/server/notifications/event_server.js index 9f37a321ae..64d034f7cc 100644 --- a/src/server/notifications/event_server.js +++ b/src/server/notifications/event_server.js @@ -35,13 +35,13 @@ function export_activity_log(req) { req.rpc_params.limit = req.rpc_params.limit || 100000; return Dispatcher.instance().read_activity_log(req) .then(logs => { - let out_lines = logs.logs.reduce( + const out_lines = logs.logs.reduce( (lines, entry) => { - let time = (new Date(entry.time)).toISOString(); - let entity_type = entry.event.split('.')[0]; - let account = entry.actor ? entry.actor.email : ''; - let entity = entry[entity_type]; - let description = entry.desc ? entry.desc.join(' ') : ''; + const time = (new Date(entry.time)).toISOString(); + const entity_type = entry.event.split('.')[0]; + const account = entry.actor ? entry.actor.email : ''; + const entity = entry[entity_type]; + const description = entry.desc ? entry.desc.join(' ') : ''; let entity_name = ''; if (entity) { entity_name = entity_type === 'obj' ? entity.key : entity.name; diff --git a/src/server/object_services/md_store.js b/src/server/object_services/md_store.js index d39cfae270..d2e91cc7b1 100644 --- a/src/server/object_services/md_store.js +++ b/src/server/object_services/md_store.js @@ -76,7 +76,7 @@ class MDStore { const hex_time = Math.floor(time / 1000).toString(16); assert(hex_time.length <= 8); const padded_hex_time = '0'.repeat(8 - hex_time.length) + hex_time; - var suffix; + let suffix; if (zero_suffix) { suffix = '0'.repeat(16); } else { diff --git a/src/server/object_services/object_server.js b/src/server/object_services/object_server.js index 3a997d3e9e..c60e0a0224 100644 --- a/src/server/object_services/object_server.js +++ b/src/server/object_services/object_server.js @@ -62,7 +62,7 @@ async function create_object_upload(req) { const encryption = _get_encryption_for_object(req); const obj_id = MDStore.instance().make_md_id(); - var info = { + const info = { _id: obj_id, system: req.system._id, bucket: req.bucket._id, @@ -204,7 +204,7 @@ function calc_retention(req) { const retention_conf = get_default_lock_config(req.bucket); if (!retention_conf) return; - let today = new Date(); + const today = new Date(); const retain_until_date = retention_conf.days ? new Date(today.setDate(today.getDate() + retention_conf.days)) : new Date(today.setFullYear(today.getFullYear() + retention_conf.years)); @@ -1373,7 +1373,7 @@ function report_endpoint_problems(req) { * @returns {nb.ObjectInfo} */ function get_object_info(md, options = {}) { - var bucket = system_store.data.get_by_id(md.bucket); + const bucket = system_store.data.get_by_id(md.bucket); return { obj_id: md._id.toHexString(), bucket: bucket.name, @@ -1409,7 +1409,7 @@ function get_object_info(md, options = {}) { } function load_bucket(req, { include_deleting } = {}) { - var bucket = req.system.buckets_by_name && req.system.buckets_by_name[req.rpc_params.bucket.unwrap()]; + const bucket = req.system.buckets_by_name && req.system.buckets_by_name[req.rpc_params.bucket.unwrap()]; if (!bucket || (bucket.deleting && !include_deleting)) { throw new RpcError('NO_SUCH_BUCKET', 'No such bucket: ' + req.rpc_params.bucket); } diff --git a/src/server/server_rpc.js b/src/server/server_rpc.js index c3be43b862..4ea30e8558 100644 --- a/src/server/server_rpc.js +++ b/src/server/server_rpc.js @@ -24,8 +24,8 @@ class ServerRpc { } get_server_options() { - let system_store = require('./system_services/system_store').get_instance(); - let auth_server = require('./common_services/auth_server'); + const system_store = require('./system_services/system_store').get_instance(); + const auth_server = require('./common_services/auth_server'); return { middleware: [ // refresh the system_store on request arrival @@ -68,9 +68,9 @@ class ServerRpc { } register_system_services() { - let rpc = this.rpc; - let schema = rpc.schema; - let options = this.get_server_options(); + const rpc = this.rpc; + const schema = rpc.schema; + const options = this.get_server_options(); rpc.register_service(schema.account_api, require('./system_services/account_server'), options); rpc.register_service(schema.system_api, @@ -103,9 +103,9 @@ class ServerRpc { } register_node_services() { - let rpc = this.rpc; - let schema = rpc.schema; - let options = this.get_server_options(); + const rpc = this.rpc; + const schema = rpc.schema; + const options = this.get_server_options(); rpc.register_service(schema.node_api, require('./node_services/node_server'), options); rpc.register_service(schema.host_api, @@ -113,25 +113,25 @@ class ServerRpc { } register_object_services() { - let rpc = this.rpc; - let schema = rpc.schema; - let options = this.get_server_options(); + const rpc = this.rpc; + const schema = rpc.schema; + const options = this.get_server_options(); rpc.register_service(schema.object_api, require('./object_services/object_server'), options); } register_func_services() { - let rpc = this.rpc; - let schema = rpc.schema; - let options = this.get_server_options(); + const rpc = this.rpc; + const schema = rpc.schema; + const options = this.get_server_options(); rpc.register_service(schema.func_api, require('./func_services/func_server'), options); } register_bg_services() { - let rpc = this.rpc; - let schema = rpc.schema; - let options = this.get_server_options(); + const rpc = this.rpc; + const schema = rpc.schema; + const options = this.get_server_options(); rpc.register_service(schema.scrubber_api, require('./bg_services/scrubber'), options); rpc.register_service(schema.replication_api, @@ -139,17 +139,17 @@ class ServerRpc { } register_hosted_agents_services() { - let rpc = this.rpc; - let schema = rpc.schema; - let options = this.get_server_options(); + const rpc = this.rpc; + const schema = rpc.schema; + const options = this.get_server_options(); rpc.register_service(schema.hosted_agents_api, require('../hosted_agents/hosted_agents'), options); } register_common_services() { - let rpc = this.rpc; - let schema = rpc.schema; - let options = this.get_server_options(); + const rpc = this.rpc; + const schema = rpc.schema; + const options = this.get_server_options(); rpc.register_service(schema.auth_api, require('./common_services/auth_server'), options); rpc.register_service(schema.debug_api, diff --git a/src/server/system_services/account_server.js b/src/server/system_services/account_server.js index 71d450e993..1dbbcd3933 100644 --- a/src/server/system_services/account_server.js +++ b/src/server/system_services/account_server.js @@ -72,7 +72,7 @@ async function create_account(req) { account.next_password_change = new Date(); } - let sys_id = req.rpc_params.new_system_parameters ? + const sys_id = req.rpc_params.new_system_parameters ? system_store.parse_system_store_id(req.rpc_params.new_system_parameters.new_system_id) : req.system._id; @@ -154,13 +154,13 @@ async function create_account(req) { } }); - var created_account = system_store.data.get_by_id(account._id); - var auth = { + const created_account = system_store.data.get_by_id(account._id); + const auth = { account_id: created_account._id }; // since we created the first system for this account // we expect just one system, but use _.each to get it from the map - var current_system = (req.system && req.system._id) || sys_id; + const current_system = (req.system && req.system._id) || sys_id; _.each(created_account.roles_by_system, (sys_roles, system_id) => { //we cannot assume only one system. if (current_system.toString() === system_id) { @@ -225,9 +225,9 @@ function create_external_user_account(req) { * */ function read_account(req) { - let email = req.rpc_params.email || req.account.email; + const email = req.rpc_params.email || req.account.email; - let account = system_store.get_account_by_email(email); + const account = system_store.get_account_by_email(email); if (!account) { throw new RpcError('NO_SUCH_ACCOUNT', 'No such account email: ' + email); } @@ -305,7 +305,7 @@ async function generate_account_keys(req) { * */ function update_account_s3_access(req) { - let account = _.cloneDeep(system_store.get_account_by_email(req.rpc_params.email)); + const account = _.cloneDeep(system_store.get_account_by_email(req.rpc_params.email)); if (!account) { throw new RpcError('NO_SUCH_ACCOUNT', 'No such account email: ' + req.rpc_params.email); } @@ -377,9 +377,9 @@ function update_account_s3_access(req) { const pool = system.pools_by_name[req.rpc_params.default_resource] || (system.namespace_resources_by_name && system.namespace_resources_by_name[req.rpc_params.default_resource]); const original_pool = pool && pool.name; - let desc_string = []; - let added_buckets = []; - let removed_buckets = []; + const desc_string = []; + const added_buckets = []; + const removed_buckets = []; desc_string.push(`${account.email.unwrap()} S3 access was updated by ${req.account && req.account.email.unwrap()}`); if (req.rpc_params.s3_access) { if (original_pool !== req.rpc_params.default_resource) { @@ -442,7 +442,7 @@ function update_account(req) { if (params.ips && !_.every(params.ips, ip_range => (net.isIP(ip_range.start) && net.isIP(ip_range.end)))) { throw new RpcError('FORBIDDEN', 'Non valid IPs'); } - let updates = { + const updates = { name: params.name, email: params.new_email, next_password_change: params.must_change_password === true ? new Date() : undefined, @@ -455,7 +455,7 @@ function update_account(req) { updates.role_config = req.rpc_params.role_config; } - let removals = { + const removals = { next_password_change: params.must_change_password === false ? true : undefined, allowed_ips: params.ips === null ? true : undefined }; @@ -503,7 +503,7 @@ async function reset_password(req) { if (!is_authorized_account) { throw new RpcError('UNAUTHORIZED', 'Invalid verification password'); } - let account = system_store.data.accounts_by_email[req.rpc_params.email.unwrap()]; + const account = system_store.data.accounts_by_email[req.rpc_params.email.unwrap()]; if (!account) { throw new RpcError('NO_SUCH_ACCOUNT', 'No such account email: ' + req.rpc_params.email); } @@ -572,10 +572,10 @@ async function get_account_usage(req) { * */ function delete_account(req) { - let account_to_delete = system_store.get_account_by_email(req.rpc_params.email); + const account_to_delete = system_store.get_account_by_email(req.rpc_params.email); _verify_can_delete_account(req, account_to_delete); - let roles_to_delete = system_store.data.roles + const roles_to_delete = system_store.data.roles .filter( role => String(role.account._id) === String(account_to_delete._id) ) @@ -623,7 +623,7 @@ function delete_account(req) { */ function delete_account_by_property(req) { let roles_to_delete = []; - let accounts_to_delete = system_store.get_accounts_by_nsfs_account_config(req.rpc_params.nsfs_account_config) + const accounts_to_delete = system_store.get_accounts_by_nsfs_account_config(req.rpc_params.nsfs_account_config) .map(account_to_delete => { _verify_can_delete_account(req, account_to_delete); roles_to_delete = roles_to_delete.concat(system_store.data.roles @@ -682,7 +682,7 @@ function list_accounts(req) { * */ function accounts_status(req) { - var any_non_support_account = _.find(system_store.data.accounts, function(account) { + const any_non_support_account = _.find(system_store.data.accounts, function(account) { return !account.is_support; }); return { @@ -711,7 +711,7 @@ async function add_external_connection(req) { throw new RpcError(res.error.code, res.error.message); } - var info = _.pick(req.rpc_params, 'name', 'endpoint', 'endpoint_type', 'aws_sts_arn'); + let info = _.pick(req.rpc_params, 'name', 'endpoint', 'endpoint_type', 'aws_sts_arn'); if (!info.endpoint_type) info.endpoint_type = 'AWS'; info.access_key = req.rpc_params.identity; info.secret_key = system_store.master_key_manager.encrypt_sensitive_string_with_master_key_id( @@ -1094,10 +1094,10 @@ function check_net_storage_connection(params) { } function delete_external_connection(req) { - var params = _.pick(req.rpc_params, 'connection_name'); - let account = req.account; + const params = _.pick(req.rpc_params, 'connection_name'); + const account = req.account; - let connection_to_delete = cloud_utils.find_cloud_connection(account, params.connection_name); + const connection_to_delete = cloud_utils.find_cloud_connection(account, params.connection_name); if (_.find(system_store.data.pools, pool => ( pool.cloud_pool_info && @@ -1135,7 +1135,7 @@ function delete_external_connection(req) { // UTILS ////////////////////////////////////////////////////////// function get_account_info(account, include_connection_cache) { - let info = _.pick(account, + const info = _.pick(account, 'name', 'email', 'is_external', @@ -1169,7 +1169,7 @@ function get_account_info(account, include_connection_cache) { info.nsfs_account_config = account.nsfs_account_config; info.systems = _.compact(_.map(account.roles_by_system, function(roles, system_id) { - var system = system_store.data.get_by_id(system_id); + const system = system_store.data.get_by_id(system_id); if (!system) { return null; } @@ -1215,7 +1215,7 @@ function get_account_info(account, include_connection_cache) { function ensure_support_account() { return system_store.refresh() .then(function() { - var existing_support_account = _.find(system_store.data.accounts, function(account) { + const existing_support_account = _.find(system_store.data.accounts, function(account) { return Boolean(account.is_support); }); if (existing_support_account) { @@ -1225,7 +1225,7 @@ function ensure_support_account() { console.log('CREATING SUPPORT ACCOUNT...'); return bcrypt_password(system_store.get_server_secret()) .then(password => { - let support_account = { + const support_account = { _id: system_store.new_system_store_id(), name: new SensitiveString('Support'), email: new SensitiveString('support@noobaa.com'), @@ -1350,7 +1350,7 @@ async function verify_authorized_account(req) { function _list_connection_usage(account, credentials) { - let cloud_pool_usage = _.map( + const cloud_pool_usage = _.map( _.filter(system_store.data.pools, pool => ( pool.cloud_pool_info && !pool.cloud_pool_info.pending_delete && @@ -1363,7 +1363,7 @@ function _list_connection_usage(account, credentials) { entity: pool.name, external_entity: pool.cloud_pool_info.target_bucket })); - let namespace_resource_usage = _.map( + const namespace_resource_usage = _.map( _.filter(system_store.data.namespace_resources, ns => ( ns.connection && ns.connection.endpoint_type === credentials.endpoint_type && diff --git a/src/server/system_services/aws_usage_metering.js b/src/server/system_services/aws_usage_metering.js index 1046b5b0a5..c9432358ce 100644 --- a/src/server/system_services/aws_usage_metering.js +++ b/src/server/system_services/aws_usage_metering.js @@ -40,7 +40,7 @@ function background_worker() { .divide(size_utils.TERABYTE) .toJSON(); - var params = { + const params = { DryRun: false, ProductCode: process.env.AWS_PRODUCT_CODE, Timestamp: new Date(), diff --git a/src/server/system_services/bucket_server.js b/src/server/system_services/bucket_server.js index 5f84f45b2d..28fae22b98 100644 --- a/src/server/system_services/bucket_server.js +++ b/src/server/system_services/bucket_server.js @@ -49,7 +49,7 @@ const qm_regex = /\?/g; const ar_regex = /\*/g; function new_bucket_defaults(name, system_id, tiering_policy_id, owner_account_id, tag, lock_enabled) { - let now = Date.now(); + const now = Date.now(); return { _id: system_store.new_system_store_id(), name: name, @@ -157,7 +157,7 @@ async function create_bucket(req) { validate_non_nsfs_bucket_creation(req); validate_nsfs_bucket(req); - let bucket = new_bucket_defaults(req.rpc_params.name, req.system._id, + const bucket = new_bucket_defaults(req.rpc_params.name, req.system._id, tiering_policy && tiering_policy._id, req.account._id, req.rpc_params.tag, req.rpc_params.lock_enabled); const bucket_m_key = system_store.master_key_manager.new_master_key({ @@ -236,7 +236,7 @@ async function create_bucket(req) { dbg.error('create_bucket: get_partial_stats failed with', error); } } - let created_bucket = find_bucket(req); + const created_bucket = find_bucket(req); return get_bucket_info({ bucket: created_bucket }); }); } @@ -505,7 +505,7 @@ async function read_bucket(req) { async function read_bucket_sdk_info(req) { const bucket = find_bucket(req); - var pools = []; + let pools = []; _.forEach(bucket.tiering && bucket.tiering.tiers, tier_and_order => { _.forEach(tier_and_order.tier.mirrors, mirror_object => { @@ -513,7 +513,7 @@ async function read_bucket_sdk_info(req) { }); }); pools = _.compact(pools); - let pool_names = pools.map(pool => pool.name); + const pool_names = pools.map(pool => pool.name); const system = req.system; @@ -581,10 +581,10 @@ function get_bucket_changes(req, update_request, bucket, tiering_policy) { events: [], alerts: [] }; - let quota = update_request.quota; + const quota = update_request.quota; // const spillover_sent = !_.isUndefined(update_request.spillover); - let single_bucket_update = { + const single_bucket_update = { _id: bucket._id }; changes.updates.buckets = [single_bucket_update]; @@ -760,7 +760,7 @@ function check_for_lambda_permission_issue(req, bucket, removed_accounts) { async function delete_bucket_and_objects(req) { - var bucket = find_bucket(req); + const bucket = find_bucket(req); const now = new Date(); // mark the bucket as deleting. it will be excluded from system_store indexes @@ -803,9 +803,9 @@ async function delete_bucket_and_objects(req) { async function delete_bucket(req) { return bucket_semaphore.surround_key(String(req.rpc_params.name), async () => { req.load_auth(); - var bucket = find_bucket(req); + const bucket = find_bucket(req); // TODO before deleting tier and tiering_policy need to check they are not in use - let tiering_policy = bucket.tiering; + const tiering_policy = bucket.tiering; const reason = await can_delete_bucket(bucket); if (reason) { throw new RpcError(reason, 'Cannot delete bucket'); @@ -894,7 +894,7 @@ async function delete_bucket_lifecycle(req) { * */ async function list_buckets(req) { - var buckets_by_name = _.filter( + const buckets_by_name = _.filter( req.system.buckets_by_name, bucket => req.has_s3_bucket_permission(bucket, "s3:listbucket") && !bucket.deleting ); @@ -1041,7 +1041,7 @@ function get_bucket_lifecycle_configuration_rules(req) { function get_cloud_buckets(req) { dbg.log0('get cloud buckets', req.rpc_params); return P.fcall(async function() { - var connection = cloud_utils.find_cloud_connection( + const connection = cloud_utils.find_cloud_connection( req.account, req.rpc_params.connection ); @@ -1062,7 +1062,7 @@ function get_cloud_buckets(req) { return result; }())); } else if (connection.endpoint_type === 'NET_STORAGE') { - let used_cloud_buckets = cloud_utils.get_used_cloud_targets(['NET_STORAGE'], + const used_cloud_buckets = cloud_utils.get_used_cloud_targets(['NET_STORAGE'], system_store.data.buckets, system_store.data.pools, system_store.data.namespace_resources); const ns = new NetStorage({ @@ -1085,7 +1085,7 @@ function get_cloud_buckets(req) { return buckets.map(bucket => _inject_usage_to_cloud_bucket(bucket.name, connection.endpoint, used_cloud_buckets)); }); } else if (connection.endpoint_type === 'GOOGLE') { - let used_cloud_buckets = cloud_utils.get_used_cloud_targets(['GOOGLE'], + const used_cloud_buckets = cloud_utils.get_used_cloud_targets(['GOOGLE'], system_store.data.buckets, system_store.data.pools, system_store.data.namespace_resources); let key_file; try { @@ -1102,8 +1102,8 @@ function get_cloud_buckets(req) { .then(data => data[0].map(bucket => _inject_usage_to_cloud_bucket(bucket.name, connection.endpoint, used_cloud_buckets))); } else { // else if AWS(s3-compatible/aws/sts-aws)/Flashblade/IBM_COS - var access_key; - var secret_key; + let access_key; + let secret_key; if (connection.aws_sts_arn) { const creds = await cloud_utils.generate_aws_sts_creds(connection, "get_cloud_buckets_session"); access_key = creds.accessKeyId; @@ -1113,7 +1113,7 @@ function get_cloud_buckets(req) { access_key = connection.access_key.unwrap(); secret_key = connection.secret_key.unwrap(); } - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: connection.endpoint, accessKeyId: access_key, secretAccessKey: secret_key, @@ -1181,7 +1181,7 @@ async function add_bucket_lambda_trigger(req) { async function delete_bucket_lambda_trigger(req) { dbg.log0('delete bucket lambda trigger', req.rpc_params); const trigger_id = req.rpc_params.id; - var bucket = find_bucket(req, req.rpc_params.bucket_name); + const bucket = find_bucket(req, req.rpc_params.bucket_name); const trigger = bucket.lambda_triggers.find(trig => trig._id.toString() === trigger_id); if (!trigger) { throw new RpcError('NO_SUCH_TRIGGER', 'This trigger does not exists: ' + trigger_id); @@ -1205,7 +1205,7 @@ async function update_bucket_lambda_trigger(req) { const updates = _.pick(req.rpc_params, 'event_name', 'func_name', 'func_version', 'enabled', 'object_prefix', 'object_suffix', 'attempts'); if (_.isEmpty(updates)) return; updates.func_version = updates.func_version || '$LATEST'; - var bucket = find_bucket(req, req.rpc_params.bucket_name); + const bucket = find_bucket(req, req.rpc_params.bucket_name); const trigger = _.find(bucket.lambda_triggers, trig => trig._id.toString() === req.rpc_params.id); if (!trigger) { throw new RpcError('NO_SUCH_TRIGGER', 'This trigger does not exists: ' + req.rpc_params._id); @@ -1318,10 +1318,10 @@ function validate_trigger_update(bucket, validated_trigger) { } function _inject_usage_to_cloud_bucket(target_name, endpoint, usage_list) { - let res = { + const res = { name: target_name }; - let using_target = usage_list.find(candidate_target => (target_name === candidate_target.target_name && + const using_target = usage_list.find(candidate_target => (target_name === candidate_target.target_name && endpoint === candidate_target.endpoint)); if (using_target) { res.used_by = { @@ -1333,7 +1333,7 @@ function _inject_usage_to_cloud_bucket(target_name, endpoint, usage_list) { } function find_bucket(req, bucket_name = req.rpc_params.name) { - var bucket = req.system.buckets_by_name && req.system.buckets_by_name[bucket_name.unwrap()]; + const bucket = req.system.buckets_by_name && req.system.buckets_by_name[bucket_name.unwrap()]; if (!bucket) { dbg.error('BUCKET NOT FOUND', bucket_name); throw new RpcError('NO_SUCH_BUCKET', 'No such bucket: ' + bucket_name); @@ -1495,7 +1495,7 @@ function _calc_metrics({ let has_any_pool_configured = false; let has_enough_healthy_nodes_for_tiering = false; let has_enough_total_nodes_for_tiering = false; - let any_rebuilds = false; + const any_rebuilds = false; const internal_pool = pool_server.get_internal_mongo_pool(bucket.system); const objects_aggregate = { @@ -1565,13 +1565,13 @@ function _calc_metrics({ const actual_free = size_utils.json_to_bigint(_.get(info, 'tiering.data.free') || 0); const quota = new Quota(bucket.quota); - let available_size_for_upload = quota.get_available_size_for_upload(actual_free, objects_aggregate.size); - let available_quantity_for_upload = quota.get_available_quantity_for_upload(objects_aggregate.count); + const available_size_for_upload = quota.get_available_size_for_upload(actual_free, objects_aggregate.size); + const available_quantity_for_upload = quota.get_available_quantity_for_upload(objects_aggregate.count); if (bucket_free.isZero()) { is_no_storage = true; } else { - let free_percent = bucket_free.multiply(100).divide(bucket_total); + const free_percent = bucket_free.multiply(100).divide(bucket_total); if (free_percent < 30) { is_storage_low = true; } @@ -1707,7 +1707,7 @@ function calc_quota_status(metrics) { } function resolve_tiering_policy(req, policy_name) { - var tiering_policy = req.system.tiering_policies_by_name[policy_name.unwrap()]; + const tiering_policy = req.system.tiering_policies_by_name[policy_name.unwrap()]; if (!tiering_policy) { dbg.error('TIER POLICY NOT FOUND', policy_name); throw new RpcError('INVALID_BUCKET_STATE', 'Bucket tiering policy not found'); @@ -1802,7 +1802,7 @@ async function get_bucket_replication(req) { const replication = await replication_store.instance().get_replication_by_id(replication_id); const bucket_names_replication = _.map(replication, rule => { - let named_rule = { ...rule, destination_bucket: system_store.data.get_by_id(rule.destination_bucket).name }; + const named_rule = { ...rule, destination_bucket: system_store.data.get_by_id(rule.destination_bucket).name }; delete named_rule.rule_status; return named_rule; }); @@ -1839,8 +1839,8 @@ function validate_replication(req) { if (replication_rules.length > config.BUCKET_REPLICATION_MAX_RULES || replication_rules.length < 1) throw new RpcError('INVALID_REPLICATION_POLICY', 'Number of rules is invalid'); - let rule_ids = []; - let pref_by_dst_bucket = {}; + const rule_ids = []; + const pref_by_dst_bucket = {}; for (const rule of replication_rules) { const { destination_bucket, filter, rule_id } = rule; @@ -1885,7 +1885,7 @@ function normalize_replication(req) { }); - let { log_replication_info } = req.rpc_params.replication_policy; + const { log_replication_info } = req.rpc_params.replication_policy; if (log_replication_info && !log_replication_info.logs_location.logs_bucket) { const logs_bucket = find_bucket(req); const logs_bucket_info = get_bucket_info({ bucket: logs_bucket }); diff --git a/src/server/system_services/cluster_server.js b/src/server/system_services/cluster_server.js index d32be42147..05fba9ba31 100644 --- a/src/server/system_services/cluster_server.js +++ b/src/server/system_services/cluster_server.js @@ -134,13 +134,13 @@ function pre_add_member_to_cluster(req) { my_address = response.caller_address || os_utils.get_local_ipv4_ips()[0]; if (!is_clusterized && response.caller_address) { dbg.log0('updating adding server ip in db'); - let shard_idx = cutil.find_shard_index(req.rpc_params.shard); - let server_idx = _.findIndex(topology.shards[shard_idx].servers, + const shard_idx = cutil.find_shard_index(req.rpc_params.shard); + const server_idx = _.findIndex(topology.shards[shard_idx].servers, server => net_utils.is_localhost(server.address)); if (server_idx === -1) { dbg.warn("db does not contain internal ip of master server"); } else { - let new_shards = topology.shards; + const new_shards = topology.shards; new_shards[shard_idx].servers[server_idx] = { address: response.caller_address }; @@ -265,7 +265,7 @@ function verify_join_conditions(req) { return P.resolve() .then(() => os_utils.os_info()) .then(os_info => { - let hostname = os_info.hostname; + const hostname = os_info.hostname; let caller_address; if (req.connection && req.connection.url) { caller_address = req.connection.url.hostname.includes('ffff') ? @@ -428,14 +428,14 @@ function join_to_cluster(req) { dbg.log0('server new role is', req.rpc_params.role); if (req.rpc_params.role === 'SHARD') { //Server is joining as a new shard, update the shard topology - let shards = cutil.get_topology().shards; + const shards = cutil.get_topology().shards; shards.push({ shardname: req.rpc_params.shard, servers: [{ address: req.rpc_params.ip }] }); - let cluster_info = { + const cluster_info = { owner_address: req.rpc_params.ip, shards: shards }; @@ -463,7 +463,7 @@ function join_to_cluster(req) { //.then(() => _attach_server_configuration({})) //.then((res_params) => _update_cluster_info(res_params)) .then(function() { - var topology_to_send = _.omit(cutil.get_topology(), 'dns_servers', 'timezone'); + const topology_to_send = _.omit(cutil.get_topology(), 'dns_servers', 'timezone'); dbg.log0('Added member, publishing updated topology', cutil.pretty_topology(topology_to_send)); //Mongo servers are up, update entire cluster with the new topology return _publish_to_cluster('news_updated_topology', { @@ -559,13 +559,13 @@ function update_member_of_cluster(req) { if (shard_index === -1 || server_idx === -1) { throw new Error(`could not find address:${req.rpc_params.secret} in any shard`); } - let new_shard = topology.shards[shard_index]; + const new_shard = topology.shards[shard_index]; old_address = new_shard.servers[server_idx].address; new_shard.servers[server_idx] = { address: req.rpc_params.new_address }; - let new_rs_params = { + const new_rs_params = { name: new_shard.shardname, IPs: cutil.extract_servers_ip( new_shard.servers @@ -581,7 +581,7 @@ function update_member_of_cluster(req) { .then(() => _update_cluster_info(topology)) .then(() => { topology = cutil.get_topology(); - var topology_to_send = _.omit(topology, 'dns_servers', 'timezone'); + const topology_to_send = _.omit(topology, 'dns_servers', 'timezone'); dbg.log0('Added member, publishing updated topology', cutil.pretty_topology(topology_to_send)); // Mongo servers are up, update entire cluster with the new topology // Notice that we send additional parameters which will be used for the changed server @@ -669,13 +669,13 @@ function news_updated_topology(req) { function redirect_to_cluster_master(req) { - let current_clustering = system_store.get_local_cluster_info(); + const current_clustering = system_store.get_local_cluster_info(); if (!current_clustering) { - let address = system_store.data.systems[0].base_address || os_utils.get_local_ipv4_ips()[0]; + const address = system_store.data.systems[0].base_address || os_utils.get_local_ipv4_ips()[0]; return address; } if (!current_clustering.is_clusterized) { - let address = system_store.data.systems[0].base_address || current_clustering.owner_address; + const address = system_store.data.systems[0].base_address || current_clustering.owner_address; return address; } return P.fcall(function() { @@ -683,12 +683,12 @@ function redirect_to_cluster_master(req) { }) .catch(err => { dbg.log0('redirect_to_cluster_master caught error', err); - let topology = cutil.get_topology(); + const topology = cutil.get_topology(); let res_host; if (topology && topology.shards) { _.forEach(topology.shards, shard => { if (String(shard.shardname) === String(topology.owner_shardname)) { - let hosts_excluding_current = _.difference(shard.servers, [{ + const hosts_excluding_current = _.difference(shard.servers, [{ address: topology.owner_address }]); if (hosts_excluding_current.length > 0) { @@ -708,12 +708,12 @@ function redirect_to_cluster_master(req) { function set_debug_level(req) { dbg.log0('Recieved set_debug_level req', req.rpc_params); - var debug_params = req.rpc_params; - var target_servers = []; + const debug_params = req.rpc_params; + const target_servers = []; let audit_activity = {}; return P.fcall(function() { if (debug_params.target_secret) { - let cluster_server = system_store.data.cluster_by_server[debug_params.target_secret]; + const cluster_server = system_store.data.cluster_by_server[debug_params.target_secret]; if (!cluster_server) { throw new RpcError('CLUSTER_SERVER_NOT_FOUND', `Server with secret key: ${debug_params.target_secret} was not found` @@ -758,7 +758,7 @@ function set_debug_level(req) { function apply_set_debug_level(req) { dbg.log0('Recieved apply_set_debug_level req', req.rpc_params); if (req.rpc_params.target_secret) { - let cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; + const cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; if (!cluster_server) { throw new RpcError('CLUSTER_SERVER_NOT_FOUND', `Server with secret key: ${req.rpc_params.target_secret} was not found`); } @@ -813,11 +813,11 @@ function _set_debug_level_internal(req, level) { })) .then(() => MongoCtrl.set_debug_level(level ? 5 : 0)) .then(() => { - var update_object = {}; - var debug_mode = level > 0 ? Date.now() : undefined; + const update_object = {}; + const debug_mode = level > 0 ? Date.now() : undefined; if (req.rpc_params.target_secret) { - let cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; + const cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; if (!cluster_server) { throw new RpcError('CLUSTER_SERVER_NOT_FOUND', `Server with secret key: ${req.rpc_params.target_secret} was not found`); @@ -865,14 +865,14 @@ function _set_debug_level_internal(req, level) { function diagnose_system(req) { - var target_servers = []; + const target_servers = []; const TMP_WORK_DIR = `/tmp/cluster_diag`; const INNER_PATH = `${process.cwd()}/build`; const OUT_PATH = '/public/' + req.system.name + '_cluster_diagnostics.tgz'; const WORKING_PATH = `${INNER_PATH}${OUT_PATH}`; if (req.rpc_params.target_secret) { - let cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; + const cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; if (!cluster_server) { throw new RpcError('CLUSTER_SERVER_NOT_FOUND', `Server with secret key: ${req.rpc_params.target_secret} was not found` @@ -927,8 +927,8 @@ function collect_server_diagnostics(req) { .then(() => os_utils.os_info()) .then(os_info => { dbg.log0('Recieved diag req'); - var out_path = '/public/' + os_info.hostname + '_srv_diagnostics.tgz'; - var inner_path = process.cwd() + '/build' + out_path; + const out_path = '/public/' + os_info.hostname + '_srv_diagnostics.tgz'; + const inner_path = process.cwd() + '/build' + out_path; return P.resolve() .then(() => diag.collect_server_diagnostics(req)) .then(() => diag.pack_diagnostics(inner_path)) @@ -963,7 +963,7 @@ function collect_server_diagnostics(req) { function read_server_time(req) { - let cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; + const cluster_server = system_store.data.cluster_by_server[req.rpc_params.target_secret]; if (!cluster_server) { throw new RpcError('CLUSTER_SERVER_NOT_FOUND', `Server with secret key: ${req.rpc_params.target_secret} was not found`); @@ -985,8 +985,8 @@ function apply_read_server_time(req) { // function read_server_config(req) { - let using_dhcp = false; - let srvconf = {}; + const using_dhcp = false; + const srvconf = {}; return P.resolve() .then(() => _attach_server_configuration(srvconf)) @@ -1011,7 +1011,7 @@ function update_server_conf(req) { } let audit_desc = ``; - let audit_server = {}; + const audit_server = {}; return P.resolve() .then(() => { audit_server.hostname = _.get(cluster_server, 'heartbeat.health.os_info.hostname'); @@ -1123,7 +1123,7 @@ function _verify_join_preconditons(req) { } }) .then(() => { - let system = system_store.data.systems[0]; + const system = system_store.data.systems[0]; if (system) { //Verify we are not already joined to a cluster //TODO:: think how do we want to handle it, if at all @@ -1157,8 +1157,8 @@ function _verify_join_preconditons(req) { function _add_new_shard_on_server(shardname, ip, params) { // "cache" current topology until all changes take affect, since we are about to lose mongo // until the process is done - let current_topology = cutil.get_topology(); - var config_updates = {}; + const current_topology = cutil.get_topology(); + const config_updates = {}; dbg.log0('Adding shard, new topology', cutil.pretty_topology(current_topology)); //Actually add a new mongo shard instance @@ -1212,9 +1212,8 @@ function _add_new_shard_on_server(shardname, ip, params) { function _initiate_replica_set(shardname) { dbg.log0('Adding first RS server to', shardname); - var new_topology = cutil.get_topology(); - var shard_idx; - shard_idx = cutil.find_shard_index(shardname); + const new_topology = cutil.get_topology(); + const shard_idx = cutil.find_shard_index(shardname); //No Such shard if (shard_idx === -1) { dbg.log0('Cannot add RS member to non-existing shard'); @@ -1242,7 +1241,7 @@ function _update_cluster_info(params) { }) .then(new_clustering => { current_clustering = current_clustering || new_clustering; - var update = _.defaults(_.pick(params, _.keys(current_clustering)), current_clustering); + const update = _.defaults(_.pick(params, _.keys(current_clustering)), current_clustering); update.owner_secret = system_store.get_server_secret(); //Keep original owner_secret update.owner_address = params.owner_address || current_clustering.owner_address; update._id = current_clustering._id; @@ -1281,8 +1280,8 @@ function _add_new_server_to_replica_set(params) { const shardname = params.shardname; const ip = params.ip; dbg.log0('Adding RS server to', shardname); - var new_topology = cutil.get_topology(); - var shard_idx = cutil.find_shard_index(shardname); + const new_topology = cutil.get_topology(); + const shard_idx = cutil.find_shard_index(shardname); //No Such shard if (shard_idx === -1) { @@ -1322,7 +1321,7 @@ function _add_new_server_to_replica_set(params) { }) .then(() => { dbg.log0('Adding new replica set member to the set'); - let new_rs_params = { + const new_rs_params = { name: shardname, IPs: cutil.extract_servers_ip( cutil.get_topology().shards[shard_idx].servers @@ -1355,7 +1354,7 @@ function _add_new_config_on_server(cfg_array, params) { } function _publish_to_cluster(apiname, req_params) { - var servers = []; + const servers = []; _.each(cutil.get_topology().shards, function(shard) { _.each(shard.servers, function(single_srv) { servers.push(single_srv.address); @@ -1372,7 +1371,7 @@ function _publish_to_cluster(apiname, req_params) { } function _update_rs_if_needed(IPs, name, is_config) { - var config_changes = cutil.rs_array_changes(IPs, name, is_config); + const config_changes = cutil.rs_array_changes(IPs, name, is_config); if (config_changes) { return P.resolve() .then(() => { @@ -1442,7 +1441,7 @@ async function _attach_server_configuration(cluster_server) { } function check_cluster_status() { - var servers = system_store.data.clusters; + const servers = system_store.data.clusters; dbg.log2('check_cluster_status', servers); const other_servers = _.filter(servers, server => server.owner_secret !== system_store.get_server_secret()); diff --git a/src/server/system_services/objects/quota.js b/src/server/system_services/objects/quota.js index 8876ee5a13..87a5c17bc8 100644 --- a/src/server/system_services/objects/quota.js +++ b/src/server/system_services/objects/quota.js @@ -150,11 +150,11 @@ class Quota { * @returns - is quota exceeded */ is_quota_exceeded(bucket) { - var exceeded = { + const exceeded = { is_quota_exceeded: false, is_quota_low: false }; - let {size_used_percent, quantity_used_percent} = this.get_bucket_quota_usages_percent(bucket); + const {size_used_percent, quantity_used_percent} = this.get_bucket_quota_usages_percent(bucket); exceeded.is_quota_exceeded = (size_used_percent >= 100 || quantity_used_percent >= 100); exceeded.is_quota_low = !exceeded.is_quota_exceeded && (size_used_percent >= config.QUOTA_LOW_THRESHOLD || quantity_used_percent >= config.QUOTA_LOW_THRESHOLD); @@ -170,7 +170,7 @@ class Quota { get_available_size_for_upload(total_size, used) { let available_size_for_upload = total_size; if (this.get_quota_by_size() > 0) { - let quota_size_free = size_utils.json_to_bigint(this.get_quota_by_size()) + const quota_size_free = size_utils.json_to_bigint(this.get_quota_by_size()) .minus(size_utils.json_to_bigint(used)); available_size_for_upload = size_utils.size_min([ size_utils.bigint_to_json(quota_size_free), @@ -188,7 +188,7 @@ class Quota { get_available_quantity_for_upload(used) { let available_quantity_for_upload = config.QUOTA_MAX_OBJECTS; if (this.get_quota_by_quantity() > 0) { - let quota_quantity_free = size_utils.json_to_bigint(this.get_quota_by_quantity()) + const quota_quantity_free = size_utils.json_to_bigint(this.get_quota_by_quantity()) .minus(size_utils.json_to_bigint(used)); available_quantity_for_upload = size_utils.bigint_to_json(quota_quantity_free); } diff --git a/src/server/system_services/pool_server.js b/src/server/system_services/pool_server.js index 9144c2a409..f2602eb265 100644 --- a/src/server/system_services/pool_server.js +++ b/src/server/system_services/pool_server.js @@ -84,7 +84,7 @@ function set_pool_controller_factory(pool_controller_factory) { } function new_pool_defaults(name, system_id, resource_type, pool_node_type) { - let now = Date.now(); + const now = Date.now(); return { _id: system_store.new_system_store_id(), system: system_id, @@ -317,12 +317,12 @@ async function create_namespace_resource(req) { } async function create_cloud_pool(req) { - var name = req.rpc_params.name; - var connection = cloud_utils.find_cloud_connection(req.account, req.rpc_params.connection); + const name = req.rpc_params.name; + const connection = cloud_utils.find_cloud_connection(req.account, req.rpc_params.connection); const secret_key = system_store.master_key_manager.encrypt_sensitive_string_with_master_key_id( connection.secret_key, req.account.master_key_id._id); - var cloud_info = _.omitBy({ + const cloud_info = _.omitBy({ endpoint: connection.endpoint, target_bucket: req.rpc_params.target_bucket, auth_method: connection.auth_method, @@ -382,7 +382,7 @@ async function create_cloud_pool(req) { }; const pool_node_type = map_pool_type[connection.endpoint_type]; - var pool = new_pool_defaults(name, req.system._id, 'CLOUD', pool_node_type); + const pool = new_pool_defaults(name, req.system._id, 'CLOUD', pool_node_type); dbg.log0('Creating new cloud_pool', pool); pool.cloud_pool_info = cloud_info; @@ -465,8 +465,8 @@ async function update_cloud_pool(req) { } function create_mongo_pool(req) { - var name = req.rpc_params.name; - var mongo_info = {}; + const name = req.rpc_params.name; + const mongo_info = {}; if (config.DB_TYPE === 'postgres') { dbg.error('Cannot create mongo pool on PostgreSQL'); @@ -478,7 +478,7 @@ function create_mongo_pool(req) { throw new Error('System already has mongo pool'); } - var pool = new_pool_defaults(name, req.system._id, 'INTERNAL', 'BLOCK_STORE_MONGO'); + const pool = new_pool_defaults(name, req.system._id, 'INTERNAL', 'BLOCK_STORE_MONGO'); dbg.log0('Creating new mongo_pool', pool); pool.mongo_pool_info = mongo_info; @@ -505,7 +505,7 @@ function create_mongo_pool(req) { } async function read_pool(req) { - var pool = find_pool_by_name(req); + const pool = find_pool_by_name(req); const nodes_aggregate_pool = await nodes_client.instance().aggregate_nodes_by_pool([pool.name], req.system._id); const hosts_aggregate_pool = await nodes_client.instance().aggregate_hosts_by_pool([pool.name], req.system._id); return get_pool_info(pool, nodes_aggregate_pool, hosts_aggregate_pool); @@ -698,7 +698,7 @@ async function delete_hosts_pool(req, pool) { }); const related_funcs = await func_store.instance().list_funcs_by_pool(req.system._id, pool._id); for (const func of related_funcs) { - let new_pools_arr = func.pools.filter(function(obj) { + const new_pools_arr = func.pools.filter(function(obj) { return obj.toString() !== (pool._id).toString(); }); await func_store.instance().update_func(func._id, { 'pools': new_pools_arr }); @@ -717,7 +717,7 @@ async function get_current_hosts_count(pool_name, system_id) { function delete_resource_pool(req, pool) { dbg.log0('Deleting resource pool', pool.name); - var pool_name = pool.name; + const pool_name = pool.name; return P.resolve() .then(() => { const reason = check_resource_pool_deletion(pool); @@ -748,7 +748,7 @@ function delete_resource_pool(req, pool) { .then(function() { // rename the deleted pool to avoid an edge case where there are collisions // with a new resource pool name - let db_update = { + const db_update = { _id: pool._id, name: pool.name + '#' + pool._id }; @@ -783,7 +783,7 @@ function delete_resource_pool(req, pool) { } function get_associated_buckets(req) { - var pool = find_pool_by_name(req); + const pool = find_pool_by_name(req); return get_associated_buckets_int(pool); } @@ -859,7 +859,7 @@ async function get_cloud_services_stats(req) { } function get_pool_history(req) { - let pool_list = req.rpc_params.pool_list; + const pool_list = req.rpc_params.pool_list; return HistoryDataStore.instance().get_pool_history() .then(history_records => history_records.map(history_record => ({ timestamp: history_record.time_stamp.getTime(), @@ -888,7 +888,7 @@ function get_pool_history(req) { // TODO: Notice that does not include pools in disabled tiers // What should we do in that case? Shall we delete the pool or not? function get_associated_buckets_int(pool) { - var associated_buckets = _.filter(pool.system.buckets_by_name, function(bucket) { + const associated_buckets = _.filter(pool.system.buckets_by_name, function(bucket) { if (bucket.deleting) return false; return _.find(bucket.tiering.tiers, function(tier_and_order) { return _.find(tier_and_order.tier.mirrors, function(mirror) { @@ -928,8 +928,8 @@ function get_associated_accounts(pool) { } function find_pool_by_name(req) { - var name = req.rpc_params.name; - var pool = req.system.pools_by_name[name]; + const name = req.rpc_params.name; + const pool = req.system.pools_by_name[name]; if (!pool) { throw new RpcError('NO_SUCH_POOL', 'No such pool: ' + name); } @@ -994,7 +994,7 @@ function find_namespace_resource_by_name(req) { function get_pool_info(pool, nodes_aggregate_pool, hosts_aggregate_pool) { const p_nodes = _.get(nodes_aggregate_pool, ['groups', String(pool._id)], {}); const p_hosts = _.get(hosts_aggregate_pool, ['groups', String(pool._id)], { nodes: {} }); - var info = { + const info = { name: pool.name, resource_type: pool.resource_type, pool_node_type: pool.pool_node_type, @@ -1252,7 +1252,7 @@ function check_pool_deletion(pool) { } //Verify pool is not defined as default for any account - var accounts = get_associated_accounts(pool); + const accounts = get_associated_accounts(pool); if (accounts.length) { return 'DEFAULT_RESOURCE'; } @@ -1284,7 +1284,7 @@ function check_resource_pool_deletion(pool) { } //Verify pool is not defined as default for any account - var accounts = get_associated_accounts(pool); + const accounts = get_associated_accounts(pool); if (accounts.length) { return 'DEFAULT_RESOURCE'; } diff --git a/src/server/system_services/stats_aggregator.js b/src/server/system_services/stats_aggregator.js index edfa26b146..d00f57ee3b 100644 --- a/src/server/system_services/stats_aggregator.js +++ b/src/server/system_services/stats_aggregator.js @@ -156,20 +156,20 @@ const PARTIAL_SINGLE_SYS_DEFAULTS = { //Aggregate bucket configuration and policies function _aggregate_buckets_config(system) { - let bucket_config = []; + const bucket_config = []; const sorted_1k_buckets = system.buckets .sort((bucket_a, bucket_b) => bucket_b.num_objects.value - bucket_a.num_objects.value) .slice(0, 1000); for (const cbucket of sorted_1k_buckets) { - let current_config = {}; + const current_config = {}; current_config.num_objects = cbucket.num_objects.value; current_config.versioning = cbucket.versioning; current_config.quota = Boolean(cbucket.quota); current_config.tiers = []; if (cbucket.tiering) { for (const ctier of cbucket.tiering.tiers) { - let current_tier = _.find(system.tiers, t => ctier.tier === t.name); + const current_tier = _.find(system.tiers, t => ctier.tier === t.name); if (current_tier) { current_config.tiers.push({ placement_type: current_tier.data_placement, @@ -189,12 +189,12 @@ function _aggregate_buckets_config(system) { //Collect systems related stats and usage async function get_systems_stats(req) { - var sys_stats = _.cloneDeep(SYSTEM_STATS_DEFAULTS); + const sys_stats = _.cloneDeep(SYSTEM_STATS_DEFAULTS); sys_stats.agent_version = process.env.AGENT_VERSION || 'Unknown'; sys_stats.count = system_store.data.systems.length; sys_stats.os_release = (await fs.promises.readFile('/etc/redhat-release').catch(fs_utils.ignore_enoent) || 'unkonwn').toString(); sys_stats.platform = process.env.PLATFORM; - var cluster = system_store.data.clusters[0]; + const cluster = system_store.data.clusters[0]; if (cluster && cluster.cluster_id) { sys_stats.clusterid = cluster.cluster_id; } @@ -300,7 +300,7 @@ async function get_partial_providers_stats(req) { if (bucket.deleting) continue; const { pools, objects_size } = bucket.storage_stats; const types_mapped = new Map(); - for (let [key, value] of Object.entries(pools)) { + for (const [key, value] of Object.entries(pools)) { const pool = system_store.data.pools.find(pool_rec => String(pool_rec._id) === String(key)); // TODO: Handle deleted pools if (!pool) continue; @@ -546,7 +546,7 @@ async function _partial_buckets_info(req) { } -var NODES_STATS_DEFAULTS = { +const NODES_STATS_DEFAULTS = { count: 0, hosts_count: 0, os: {}, @@ -588,8 +588,8 @@ const NAMESPACE_RESOURCE_STATS_DEFAULTS = { //Collect nodes related stats and usage function get_nodes_stats(req) { - var nodes_stats = _.cloneDeep(NODES_STATS_DEFAULTS); - var nodes_histo = get_empty_nodes_histo(); + const nodes_stats = _.cloneDeep(NODES_STATS_DEFAULTS); + const nodes_histo = get_empty_nodes_histo(); //Per each system fill out the needed info const system = system_store.data.systems[0]; const support_account = _.find(system_store.data.accounts, account => account.is_support); @@ -651,7 +651,7 @@ function get_ops_stats(req) { } function get_bucket_sizes_stats(req) { - let ret = []; + const ret = []; for (const b of system_store.data.buckets) { if (b.deleting) continue; if (b.storage_stats.objects_hist && @@ -816,7 +816,7 @@ function get_tier_stats(req) { } async function get_all_stats(req) { - var stats_payload = { + const stats_payload = { systems_stats: null, nodes_stats: null, cloud_pool_stats: null, @@ -992,14 +992,14 @@ function partial_cycle_parse_prometheus_metrics(payload) { const { unhealthy_namespace_resource_count, namespace_resource_count, namespace_resources } = namespace_resource_stats; const { logical_size, physical_size } = savings; - let percentage_of_unhealthy_buckets = unhealthy_buckets / buckets_num; + const percentage_of_unhealthy_buckets = unhealthy_buckets / buckets_num; // 0 - Everything is fine (BE wise not means that there is no NooBaa CR errors, default status for working system) // 1 - All resources are unhealthy // 2 - Object bucket has an issue // 3 - Many buckets have issues // 4 - Some buckets have issues - let health_status = (unhealthy_pool_count === pool_count && 1) || + const health_status = (unhealthy_pool_count === pool_count && 1) || (unhealthy_buckets === 1 && 2) || (percentage_of_unhealthy_buckets > 0.5 && 4) || (percentage_of_unhealthy_buckets > 0.3 && 3) || 0; @@ -1086,7 +1086,7 @@ function add_sample_point(opname, duration) { } async function object_usage_scrubber(req) { - let new_req = req; + const new_req = req; new_req.rpc_params.till_time = req.system.last_stats_report; await object_server.reset_s3_ops_counters(new_req); new_req.rpc_params.last_stats_report = Date.now(); @@ -1096,7 +1096,7 @@ async function object_usage_scrubber(req) { function get_empty_nodes_histo() { //TODO: Add histogram for limit, once implemented - var empty_nodes_histo = {}; + const empty_nodes_histo = {}; empty_nodes_histo.histo_allocation = new Histogram('AllocationSizes(GB)', [{ label: 'low', start_val: 0 @@ -1156,8 +1156,8 @@ function get_empty_nodes_histo() { } function _handle_payload() { - let system = system_store.data.systems[0]; - let support_account = _.find(system_store.data.accounts, account => account.is_support); + const system = system_store.data.systems[0]; + const support_account = _.find(system_store.data.accounts, account => account.is_support); return server_rpc.client.stats.object_usage_scrubber({}, { auth_token: auth_server.make_auth_token({ system_id: system._id, diff --git a/src/server/system_services/system_server.js b/src/server/system_services/system_server.js index aa0574fda1..993e18ca9e 100644 --- a/src/server/system_services/system_server.js +++ b/src/server/system_services/system_server.js @@ -111,9 +111,9 @@ function _initialize_debug_level(system) { .then(() => { // The purpose of this code is to initialize the debug level // on server's startup, to synchronize the db with the actual value - let current_clustering = system_store.get_local_cluster_info(); + const current_clustering = system_store.get_local_cluster_info(); if (current_clustering) { - var update_object = {}; + const update_object = {}; update_object.clusters = [{ _id: current_clustering._id, debug_level: 0 @@ -126,7 +126,7 @@ function _initialize_debug_level(system) { } function new_system_defaults(name, owner_account_id) { - var system = { + const system = { _id: system_store.new_system_store_id(), name: name, owner: owner_account_id, @@ -184,7 +184,7 @@ function new_system_changes(name, owner_account_id) { const bucket_with_suffix = default_bucket_name + '#' + Date.now().toString(36); - let system = new_system_defaults(name, owner_account_id); + const system = new_system_defaults(name, owner_account_id); const m_key = system_store.master_key_manager.new_master_key({ description: `master key of ${system._id} system`, @@ -233,7 +233,7 @@ function new_system_changes(name, owner_account_id) { }] ); - let bucket = bucket_server.new_bucket_defaults( + const bucket = bucket_server.new_bucket_defaults( default_bucket_name, system._id, policy._id, @@ -546,12 +546,12 @@ async function read_system(req) { maintenance_mode.time_left = Math.max(0, system.maintenance_mode - now); } - let phone_home_config = {}; + const phone_home_config = {}; if (system.freemium_cap.phone_home_unable_comm) { phone_home_config.phone_home_unable_comm = true; } - let system_cap = system.freemium_cap.cap_terabytes ? system.freemium_cap.cap_terabytes : Number.MAX_SAFE_INTEGER; + const system_cap = system.freemium_cap.cap_terabytes ? system.freemium_cap.cap_terabytes : Number.MAX_SAFE_INTEGER; // TODO use n2n_config.stun_servers ? // var stun_address = 'stun://' + ip_address + ':' + stun.PORT; @@ -562,7 +562,7 @@ async function read_system(req) { // dbg.log0('read_system: n2n_config.stun_servers', n2n_config.stun_servers); // } - let last_upgrade = system.upgrade_history.successful_upgrades[0] && { + const last_upgrade = system.upgrade_history.successful_upgrades[0] && { timestamp: system.upgrade_history.successful_upgrades[0].timestamp }; @@ -576,7 +576,7 @@ async function read_system(req) { name: system.name, objects: objects_sys.count.toJSNumber(), roles: _.map(system.roles_by_account, function(roles, account_id) { - var account = system_store.data.get_by_id(account_id); + const account = system_store.data.get_by_id(account_id); if (!account) return; return { roles: roles, @@ -588,7 +588,7 @@ async function read_system(req) { const tiering_pools_status = node_allocator.get_tiering_status(bucket.tiering); Object.assign(tiering_status_by_tier, tiering_pools_status); const func_configs = funcs.map(func => func.config); - let b = bucket_server.get_bucket_info({ + const b = bucket_server.get_bucket_info({ bucket, nodes_aggregate_pool: nodes_aggregate_pool_with_cloud_and_mongo, hosts_aggregate_pool, @@ -650,7 +650,7 @@ async function read_system(req) { } function update_system(req) { - var updates = _.pick(req.rpc_params, 'name'); + const updates = _.pick(req.rpc_params, 'name'); updates._id = req.system._id; return system_store.make_changes({ update: { @@ -660,7 +660,7 @@ function update_system(req) { } function set_maintenance_mode(req) { - var updates = {}; + const updates = {}; let audit_desc = ''; const send_event = req.rpc_params.duration ? 'dbg.maintenance_mode' : 'dbg.maintenance_mode_stopped'; @@ -743,7 +743,7 @@ function list_systems(req) { */ function list_systems_int(account, get_ids) { // support gets to see all systems - var roles; + let roles; if (account) { roles = _.filter(system_store.data.roles, function(role) { return String(role.account._id) === String(account._id); @@ -765,7 +765,7 @@ function list_systems_int(account, get_ids) { * */ function add_role(req) { - var account = find_account_by_email(req); + const account = find_account_by_email(req); return system_store.make_changes({ insert: { roles: [{ @@ -786,13 +786,13 @@ function add_role(req) { * */ function remove_role(req) { - var account = find_account_by_email(req); - var roles = _.filter(system_store.data.roles, + const account = find_account_by_email(req); + const roles = _.filter(system_store.data.roles, role => String(role.system._id) === String(req.system._id) && String(role.account._id) === String(account._id) && role.role === req.rpc_params.role); if (!roles.length) return; - var roles_ids = _.map(roles, '_id'); + const roles_ids = _.map(roles, '_id'); return system_store.make_changes({ remove: { roles: roles_ids @@ -801,7 +801,7 @@ function remove_role(req) { } async function set_last_stats_report_time(req) { - var updates = {}; + const updates = {}; updates._id = req.system._id; updates.last_stats_report = req.rpc_params.last_stats_report; await system_store.make_changes({ @@ -1203,7 +1203,7 @@ function get_system_info(system, get_id) { } function find_account_by_email(req) { - var account = system_store.get_account_by_email(req.rpc_params.email); + const account = system_store.get_account_by_email(req.rpc_params.email); if (!account) { throw new RpcError('NO_SUCH_ACCOUNT', 'No such account email: ' + req.rpc_params.email); } @@ -1501,8 +1501,8 @@ async function upsert_master_key(params) { // find and set the account's pools and namespace resources and // set the new secrets by the new sync creds function get_pools_and_ns_resources_changes(sync_creds, account_id) { - let pools_updates = []; - let ns_resources_updates = []; + const pools_updates = []; + const ns_resources_updates = []; _.map(sync_creds, creds => { const pool_update = system_store.data.pools @@ -1548,11 +1548,11 @@ function get_entity_info(entity, entity_type) { } async function upgrade_master_keys() { - let master_keys = []; - let buckets_updates = []; - let accounts_updates = []; - let pools_updates = []; - let namespace_resources_updates = []; + const master_keys = []; + const buckets_updates = []; + const accounts_updates = []; + const pools_updates = []; + const namespace_resources_updates = []; let system_master_key = system_store.data.systems[0].master_key_id; // upgrade system master key if it doesn't exist if (!system_master_key) { diff --git a/src/server/system_services/system_store.js b/src/server/system_services/system_store.js index 5f66c3108d..59770412fb 100644 --- a/src/server/system_services/system_store.js +++ b/src/server/system_services/system_store.js @@ -150,7 +150,7 @@ const COLLECTIONS = [{ const COLLECTIONS_BY_NAME = _.keyBy(COLLECTIONS, 'name'); -let accounts_by_email_lowercase = []; +const accounts_by_email_lowercase = []; /** @@ -248,10 +248,10 @@ class SystemStoreData { rebuild_idmap() { this.idmap = {}; _.each(COLLECTIONS, col => { - let items = this[col.name]; + const items = this[col.name]; _.each(items, item => { - let idstr = String(item._id); - let existing = this.idmap[idstr]; + const idstr = String(item._id); + const existing = this.idmap[idstr]; if (existing) { dbg.error('SystemStoreData: id collision', item, existing); } else { @@ -263,7 +263,7 @@ class SystemStoreData { rebuild_object_links() { _.each(COLLECTIONS, col => { - let items = this[col.name]; + const items = this[col.name]; _.each(items, item => this.resolve_object_ids_recursive(item)); }); } @@ -279,9 +279,9 @@ class SystemStoreData { return; } const key = field.valueOf(); - let val = index.val ? _.get(item, index.val) : item; - let context = index.context ? _.get(item, index.context) : this; - let map = context[index.name] || {}; + const val = index.val ? _.get(item, index.val) : item; + const context = index.context ? _.get(item, index.context) : this; + const map = context[index.name] || {}; context[index.name] = map; if (index.val_array) { map[key] = map[key] || []; @@ -306,12 +306,12 @@ class SystemStoreData { check_indexes(col, item) { _.each(col.mem_indexes, index => { - let key = _.get(item, index.key || '_id'); - let context = index.context ? _.get(item, index.context) : this; + const key = _.get(item, index.key || '_id'); + const context = index.context ? _.get(item, index.context) : this; if (!context) return; - let map = context[index.name]; + const map = context[index.name]; if (!index.val_array) { - let existing = map && map[key]; + const existing = map && map[key]; if (existing && String(existing._id) !== String(item._id)) { throw new RpcError('CONFLICT', index.name + ' collision on key ' + key); } @@ -384,7 +384,7 @@ class SystemStore extends EventEmitter { if (this.data) { load_time = this.data.time; } - let since_load = Date.now() - load_time; + const since_load = Date.now() - load_time; if (since_load < this.START_REFRESH_THRESHOLD) { return this.data; } else if (since_load < this.FORCE_REFRESH_THRESHOLD) { @@ -412,7 +412,7 @@ class SystemStore extends EventEmitter { } this.master_key_manager.load_root_key(); - let new_data = new SystemStoreData(); + const new_data = new SystemStoreData(); let millistamp = time_utils.millistamp(); await this._register_for_changes(); await this._read_new_data_from_db(new_data); @@ -477,7 +477,7 @@ class SystemStore extends EventEmitter { } async _read_data_from_db(target) { - let non_deleted_query = { + const non_deleted_query = { deleted: null }; await db_client.instance().connect(); @@ -492,7 +492,7 @@ class SystemStore extends EventEmitter { async _read_new_data_from_db(target) { const now = Date.now(); - let newly_updated_query = { + const newly_updated_query = { last_update: { $gte: this.last_update_time, } @@ -654,13 +654,13 @@ class SystemStore extends EventEmitter { const col = get_collection(name); _.each(list, item => { data.check_indexes(col, item); - let dont_change_last_update = Boolean(item.dont_change_last_update); + const dont_change_last_update = Boolean(item.dont_change_last_update); let updates = _.omit(item, '_id', '$find', 'dont_change_last_update'); - let find_id = _.pick(item, '_id'); - let finds = item.$find || (db_client.instance().is_object_id(find_id._id) && find_id); + const find_id = _.pick(item, '_id'); + const finds = item.$find || (db_client.instance().is_object_id(find_id._id) && find_id); if (_.isEmpty(updates)) return; if (!finds) throw new Error(`SystemStore: make_changes id is not of type object_id: ${find_id._id}`); - let keys = _.keys(updates); + const keys = _.keys(updates); if (_.first(keys)[0] === '$') { for (const key of keys) { @@ -763,7 +763,7 @@ class SystemStore extends EventEmitter { }); if (!this.bg_timeout) { this.bg_timeout = setTimeout(() => { - let bg_changes = this.bg_changes; + const bg_changes = this.bg_changes; this.bg_changes = null; this.bg_timeout = null; this.make_changes(bg_changes); @@ -776,7 +776,7 @@ class SystemStore extends EventEmitter { * @returns {object} */ get_local_cluster_info(get_hb) { - let owner_secret = this.get_server_secret(); + const owner_secret = this.get_server_secret(); let reply; _.each(this.data && this.data.clusters, function(cluster_info) { if (cluster_info.owner_secret === owner_secret) { diff --git a/src/server/system_services/tier_server.js b/src/server/system_services/tier_server.js index 517280332c..7a36a4d8e1 100644 --- a/src/server/system_services/tier_server.js +++ b/src/server/system_services/tier_server.js @@ -107,15 +107,15 @@ function read_tier(req) { nodes_client.instance().aggregate_data_free_by_tier([String(tier._id)], req.system._id) ]) .then(function(res) { - let nodes_aggregate_pool = res[0]; - let available_to_upload = res[1]; + const nodes_aggregate_pool = res[0]; + const available_to_upload = res[1]; return get_tier_info(tier, nodes_aggregate_pool, { mirror_storage: available_to_upload[String(tier._id)] }); }); } function _convert_pools_to_data_placement_structure(pool_ids, data_placement) { - let mirrors = []; + const mirrors = []; if (data_placement === 'MIRROR') { _.forEach(pool_ids, pool_id => mirrors.push({ _id: system_store.new_system_store_id(), @@ -200,10 +200,10 @@ function update_tier(req) { if (bucket) { if (req.rpc_params.data_placement) { //Placement policy changes const desc_string = []; - let policy_type_change = String(tier.data_placement) === String(req.rpc_params.data_placement) ? 'No changes' : + const policy_type_change = String(tier.data_placement) === String(req.rpc_params.data_placement) ? 'No changes' : `Changed to ${req.rpc_params.data_placement} from ${tier.data_placement}`; - let removed_pools = _.difference(old_pool_names, req.rpc_params.attached_pools || []); - let added_pools = _.difference(req.rpc_params.attached_pools || [], old_pool_names); + const removed_pools = _.difference(old_pool_names, req.rpc_params.attached_pools || []); + const added_pools = _.difference(req.rpc_params.attached_pools || [], old_pool_names); desc_string.push(`Bucket policy was changed by: ${req.account && req.account.email.unwrap()}`); desc_string.push(`Policy type: ${policy_type_change}`); if (removed_pools.length) { @@ -454,7 +454,7 @@ function _is_change_in_tier(old_tier, new_tier) { } function update_chunk_config_for_bucket(req) { // please remove when CCC is per tier and not per policy - var bucket = req.system.buckets_by_name && req.system.buckets_by_name[req.rpc_params.bucket_name]; + const bucket = req.system.buckets_by_name && req.system.buckets_by_name[req.rpc_params.bucket_name]; if (!bucket || bucket.deleting) { dbg.error('BUCKET NOT FOUND', req.rpc_params.bucket_name); throw new RpcError('NO_SUCH_BUCKET', 'No such bucket: ' + req.rpc_params.bucket_name); @@ -478,7 +478,7 @@ function update_chunk_config_for_bucket(req) { // please remove when CCC is per } function add_tier_to_bucket(req) { - var bucket = req.system.buckets_by_name && req.system.buckets_by_name[req.rpc_params.bucket_name]; + const bucket = req.system.buckets_by_name && req.system.buckets_by_name[req.rpc_params.bucket_name]; if (!bucket || bucket.deleting) { dbg.error('BUCKET NOT FOUND', req.rpc_params.bucket_name); throw new RpcError('NO_SUCH_BUCKET', 'No such bucket: ' + req.rpc_params.bucket_name); @@ -492,7 +492,7 @@ function add_tier_to_bucket(req) { const mirrors = _convert_pools_to_data_placement_structure(policy_pool_ids, req.rpc_params.data_placement); const info = req.system.tiering_policies_by_name[policy.name.unwrap()]; const tier0_ccc = info.tiers[0].tier.chunk_config.chunk_coder_config; - let chunk_config = chunk_config_utils.resolve_chunk_config( + const chunk_config = chunk_config_utils.resolve_chunk_config( req.rpc_params.chunk_coder_config || tier0_ccc, req.account, req.system); const new_tier_name = bucket.name + '#' + Date.now().toString(36); @@ -549,8 +549,8 @@ function read_policy(req) { nodes_client.instance().aggregate_hosts_by_pool(pool_names, req.system._id), ]) .then(function(res) { - let nodes_aggregate_pool = res[0]; - let hosts_aggregate_pool = res[1]; + const nodes_aggregate_pool = res[0]; + const hosts_aggregate_pool = res[1]; return get_tiering_policy_info(policy, node_allocator.get_tiering_status(policy), nodes_aggregate_pool, @@ -691,7 +691,6 @@ function get_tier_info(tier, nodes_aggregate_pool, tiering_tier_status) { const mirror_groups = []; _.forEach(tier.mirrors, mirror => { - let spread_storage; const pools_storage = _.map(mirror.spread_pools, pool => _.defaults(_.get(nodes_aggregate_pool, ['groups', String(pool._id), 'storage']), { used: 0, @@ -703,7 +702,7 @@ function get_tier_info(tier, nodes_aggregate_pool, tiering_tier_status) { reserved: 0 }) ); - spread_storage = size_utils.reduce_storage(size_utils.reduce_sum, pools_storage); + const spread_storage = size_utils.reduce_storage(size_utils.reduce_sum, pools_storage); _.defaults(spread_storage, { used: 0, total: 0, @@ -798,7 +797,7 @@ function calc_tier_policy_status(tier, tier_info, extra_info) { if (tier_free.isZero()) { is_no_storage = true; } else { - let free_percent = tier_free.multiply(100).divide(tier_total); + const free_percent = tier_free.multiply(100).divide(tier_total); if (free_percent < 30) { is_storage_low = true; } diff --git a/src/server/utils/clustering_utils.js b/src/server/utils/clustering_utils.js index d60912abcc..b2dc9476bd 100644 --- a/src/server/utils/clustering_utils.js +++ b/src/server/utils/clustering_utils.js @@ -25,11 +25,11 @@ function get_topology() { } function update_host_address(address) { - var current_clustering = system_store.get_local_cluster_info(); + const current_clustering = system_store.get_local_cluster_info(); //TODO:: publish changes to cluster! _.each(current_clustering.shards, function(shard, i) { - var ind = _.findIndex(shard.servers, function(srv) { + const ind = _.findIndex(shard.servers, function(srv) { return srv.address === current_clustering.owner_address; }); @@ -61,7 +61,7 @@ function extract_servers_ip(arr) { //Return all servers in the cluster, regardless of role function get_all_cluster_members() { - let servers = system_store.data.clusters.map(top => top.owner_address); + const servers = system_store.data.clusters.map(top => top.owner_address); return servers; } @@ -75,7 +75,7 @@ function verify_cluster_id(cluster_id) { //Checks if current server is a stand-alone server function is_single_server() { - var top = get_topology(); + const top = get_topology(); if (!top.config_servers.length && top.shards.length === 1 && top.shard[0].servers.length === 1) { @@ -92,22 +92,22 @@ function pretty_topology(topology) { } function rs_array_changes(new_array, name, is_config) { - var current; + let current; if (is_config) { current = extract_servers_ip(get_topology().config_servers).sort(); } else { - var shard_idx = _.findIndex(get_topology().shards, function(s) { + const shard_idx = _.findIndex(get_topology().shards, function(s) { return name === s.shardname; }); current = extract_servers_ip(get_topology().shards[shard_idx].servers); } - var changes = Array.from(new_array).sort(); + const changes = Array.from(new_array).sort(); if (current.length !== changes.length) { return true; } - var changed = false; + let changed = false; _.each(current, function(c_srv, i) { if (c_srv !== changes[i]) { changed = true; @@ -119,7 +119,7 @@ function rs_array_changes(new_array, name, is_config) { } function find_shard_index(shardname) { - var shard_idx = _.findIndex(get_topology().shards, function(s) { + const shard_idx = _.findIndex(get_topology().shards, function(s) { return shardname === s.shardname; }); @@ -128,31 +128,31 @@ function find_shard_index(shardname) { function get_cluster_info() { const get_hb = true; - let local_info = system_store.get_local_cluster_info(get_hb); - let shards = local_info.shards.map(shard => ({ + const local_info = system_store.get_local_cluster_info(get_hb); + const shards = local_info.shards.map(shard => ({ shardname: shard.shardname, servers: [] })); // list online members accoring to local mongo rs status - let online_members = [local_info.owner_address]; + const online_members = [local_info.owner_address]; _.each(system_store.data.clusters, cinfo => { - let shard = shards.find(s => s.shardname === cinfo.owner_shardname); + const shard = shards.find(s => s.shardname === cinfo.owner_shardname); const memory = { total: 0, used: 0, free: 0 }; - let cpus = { + const cpus = { count: 0, usage: 0 }; let version = '0'; let is_connected = 'DISCONNECTED'; let hostname = os.hostname(); - let time_epoch = moment().unix(); - let location = cinfo.location; - let single_server = system_store.data.clusters.length === 1; + const time_epoch = moment().unix(); + const location = cinfo.location; + const single_server = system_store.data.clusters.length === 1; let storage = { total: 0, free: 0 @@ -178,7 +178,7 @@ function get_cluster_info() { const debug_time = cinfo.debug_mode ? Math.max(0, DEBUG_MODE_PERIOD - (Date.now() - cinfo.debug_mode)) : undefined; - let server_info = { + const server_info = { version: version, hostname: hostname, secret: cinfo.owner_secret, @@ -224,7 +224,7 @@ function get_cluster_info() { if (shard.servers.length < 3) { shard.high_availabilty = false; } else { - let num_connected = shard.servers.filter(server => server.status === 'CONNECTED').length; + const num_connected = shard.servers.filter(server => server.status === 'CONNECTED').length; // to be highly available the cluster must be able to stand a failure and still // have a majority to vote for a master. shard.high_availabilty = num_connected > (shard.servers.length + 1) / 2; @@ -233,7 +233,7 @@ function get_cluster_info() { const min_requirements = get_min_requirements(); // This is a fix for the buffer of 1GB that we take in config.js min_requirements.ram += size_utils.GIGABYTE; - let cluster_info = { + const cluster_info = { master_secret: _get_master_secret(), shards: shards, min_requirements @@ -248,7 +248,7 @@ function _get_master_secret() { function get_potential_masters() { //TODO: For multiple shards, this should probably change? - var masters = []; + const masters = []; _.each(get_topology().shards[0].servers, function(s) { masters.push({ address: s.address @@ -260,12 +260,12 @@ function get_potential_masters() { function send_master_update(is_master, master_address) { - let system = system_store.data.systems[0]; + const system = system_store.data.systems[0]; if (!system) return P.resolve(); - let hosted_agents_promise = is_master ? + const hosted_agents_promise = is_master ? server_rpc.client.hosted_agents.start() : server_rpc.client.hosted_agents.stop(); - let update_master_promise = server_rpc.client.redirector.publish_to_cluster({ + const update_master_promise = server_rpc.client.redirector.publish_to_cluster({ method_api: 'server_inter_process_api', method_name: 'update_master_change', target: '', // required but irrelevant diff --git a/src/server/utils/mongo_ctrl.js b/src/server/utils/mongo_ctrl.js index c33cfd3ae1..46415f55f3 100644 --- a/src/server/utils/mongo_ctrl.js +++ b/src/server/utils/mongo_ctrl.js @@ -33,7 +33,7 @@ MongoCtrl.prototype.init = function() { //TODO:: for detaching: add remove member from replica set & destroy shard MongoCtrl.prototype.add_replica_set_member = function(name, first_server, servers) { - let self = this; + const self = this; return self._remove_single_mongo_program() .then(() => self._add_replica_set_member_program(name, first_server)) .then(() => SupervisorCtl.apply_changes()) @@ -50,7 +50,7 @@ MongoCtrl.prototype.add_replica_set_member = function(name, first_server, server }; MongoCtrl.prototype.add_new_shard_server = function(name, first_shard) { - let self = this; + const self = this; return self._remove_single_mongo_program() .then(() => self._add_new_shard_program(name, first_shard)) .then(() => SupervisorCtl.apply_changes()) @@ -58,7 +58,7 @@ MongoCtrl.prototype.add_new_shard_server = function(name, first_shard) { }; MongoCtrl.prototype.add_new_mongos = function(cfg_array) { - let self = this; + const self = this; return P.resolve() .then(() => self._add_new_mongos_program(cfg_array)) .then(() => SupervisorCtl.apply_changes()) @@ -66,7 +66,7 @@ MongoCtrl.prototype.add_new_mongos = function(cfg_array) { }; MongoCtrl.prototype.add_new_config = function() { - let self = this; + const self = this; return self._add_new_config_program() .then(() => SupervisorCtl.apply_changes()) .then(() => P.delay(5000)); // TODO: find better solution @@ -114,10 +114,10 @@ MongoCtrl.prototype.get_hb_rs_status = function() { dbg.log0('got rs status from mongo:', status); if (status.ok) { // return rs status fields specified in HB schema (cluster_schema) - let rs_status = { + const rs_status = { set: status.set, members: status.members.map(member => { - let member_status = { + const member_status = { name: member.name, health: member.health, uptime: member.uptime, @@ -135,7 +135,7 @@ MongoCtrl.prototype.get_hb_rs_status = function() { }; MongoCtrl.prototype.add_mongo_monitor_program = function() { - let program_obj = {}; + const program_obj = {}; program_obj.name = 'mongo_monitor'; program_obj.stopsignal = 'KILL'; program_obj.killasgroup = 'true'; @@ -152,12 +152,12 @@ MongoCtrl.prototype.update_dotenv = function(name, IPs) { if (!process.env.MONGO_SSL_USER) { throw new Error('MONGO_SSL_USER is missing in .env'); } - let user_name = encodeURIComponent(process.env.MONGO_SSL_USER) + '@'; + const user_name = encodeURIComponent(process.env.MONGO_SSL_USER) + '@'; dbg.log0('will update dotenv for replica set', name, 'with IPs', IPs); - let servers_str = IPs.map(ip => ip + ':' + config.MONGO_DEFAULTS.SHARD_SRV_PORT).join(','); - let url = 'mongodb://' + user_name + servers_str + '/nbcore?replicaSet=' + name + + const servers_str = IPs.map(ip => ip + ':' + config.MONGO_DEFAULTS.SHARD_SRV_PORT).join(','); + const url = 'mongodb://' + user_name + servers_str + '/nbcore?replicaSet=' + name + '&readPreference=primaryPreferred&authMechanism=MONGODB-X509'; - let old_url = process.env.MONGO_RS_URL || ''; + const old_url = process.env.MONGO_RS_URL || ''; dbg.log0('updating MONGO_RS_URL in .env from', old_url, 'to', url); dotenv.set({ key: 'MONGO_RS_URL', @@ -183,8 +183,8 @@ MongoCtrl.prototype.force_mongo_sync_journal = function() { //Internals // MongoCtrl.prototype._init_replica_set_from_shell = function(ip) { - let host = ip + ':' + config.MONGO_DEFAULTS.SHARD_SRV_PORT; - let mongo_shell_command = `mongo nbcore --port ${config.MONGO_DEFAULTS.SHARD_SRV_PORT}` + + const host = ip + ':' + config.MONGO_DEFAULTS.SHARD_SRV_PORT; + const mongo_shell_command = `mongo nbcore --port ${config.MONGO_DEFAULTS.SHARD_SRV_PORT}` + ` --eval "rs.initiate({_id: 'shard1',members: [{_id: 0,host: '${host}'}]})"`; dbg.log0(`init replica set: running command ${mongo_shell_command}`); return os_utils.exec(mongo_shell_command, { @@ -199,8 +199,8 @@ MongoCtrl.prototype._add_replica_set_member_program = async function(name, first throw new Error('port and name must be supplied to add new shard'); } - let program_obj = {}; - let dbpath = config.MONGO_DEFAULTS.COMMON_PATH + '/' + name + (first_server ? '' : 'rs'); + const program_obj = {}; + const dbpath = config.MONGO_DEFAULTS.COMMON_PATH + '/' + name + (first_server ? '' : 'rs'); // get uid and gid of common path, to set for new dbpath let stats; try { @@ -250,8 +250,8 @@ MongoCtrl.prototype._add_new_shard_program = function(name, first_shard) { throw new Error('port and name must be supplied to add new shard'); } - var program_obj = {}; - let dbpath = config.MONGO_DEFAULTS.COMMON_PATH + '/' + name; + const program_obj = {}; + const dbpath = config.MONGO_DEFAULTS.COMMON_PATH + '/' + name; program_obj.name = 'mongoshard-' + name; program_obj.command = 'mongod --shardsvr' + ' --replSet ' + name + @@ -287,7 +287,7 @@ MongoCtrl.prototype._add_new_mongos_program = function(cfg_array) { }); } - let program_obj = {}; + const program_obj = {}; program_obj.name = 'mongos'; program_obj.command = 'mongos --configdb ' + config_string; program_obj.directory = '/usr/bin'; @@ -301,8 +301,8 @@ MongoCtrl.prototype._add_new_mongos_program = function(cfg_array) { }; MongoCtrl.prototype._init_replica_set_from_shell = function(ip) { - let host = ip + ':' + config.MONGO_DEFAULTS.SHARD_SRV_PORT; - let mongo_shell_command = `mongo nbcore --port ${config.MONGO_DEFAULTS.SHARD_SRV_PORT} --ssl` + + const host = ip + ':' + config.MONGO_DEFAULTS.SHARD_SRV_PORT; + const mongo_shell_command = `mongo nbcore --port ${config.MONGO_DEFAULTS.SHARD_SRV_PORT} --ssl` + ` --sslPEMKeyFile ${config.MONGO_DEFAULTS.CLIENT_CERT_PATH}` + ` --sslCAFile ${config.MONGO_DEFAULTS.ROOT_CA_PATH} --sslAllowInvalidHostnames` + ` --eval "var host='${host}', user='${process.env.MONGO_SSL_USER}'"` + @@ -315,8 +315,8 @@ MongoCtrl.prototype._init_replica_set_from_shell = function(ip) { }; MongoCtrl.prototype._add_new_config_program = function() { - let program_obj = {}; - let dbpath = config.MONGO_DEFAULTS.CFG_DB_PATH; + const program_obj = {}; + const dbpath = config.MONGO_DEFAULTS.CFG_DB_PATH; program_obj.name = 'mongocfg'; program_obj.command = 'mongod --configsvr ' + ' --replSet ' + config.MONGO_DEFAULTS.CFG_RSET_NAME + diff --git a/src/server/utils/server_diagnostics.js b/src/server/utils/server_diagnostics.js index 062c1a6cb0..3e108ecd6a 100644 --- a/src/server/utils/server_diagnostics.js +++ b/src/server/utils/server_diagnostics.js @@ -217,7 +217,7 @@ function collect_statistics(req) { }) .then(function(restats) { if (stats_aggregator) { - var stats_data = JSON.stringify(restats); + const stats_data = JSON.stringify(restats); return fs.promises.writeFile(TMP_WORK_DIR + '/phone_home_stats.out', stats_data); } }) diff --git a/src/server/utils/supervisor_ctrl.js b/src/server/utils/supervisor_ctrl.js index 0ca1fec074..a1a8ba6dba 100644 --- a/src/server/utils/supervisor_ctrl.js +++ b/src/server/utils/supervisor_ctrl.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var fs = require('fs'); -var P = require('../../util/promise'); -var os_utils = require('../../util/os_utils'); -var config = require('../../../config.js'); +const _ = require('lodash'); +const fs = require('fs'); +const P = require('../../util/promise'); +const os_utils = require('../../util/os_utils'); +const config = require('../../../config.js'); class SupervisorCtrl { constructor() { @@ -103,7 +103,7 @@ class SupervisorCtrl { if (!this._supervised) { return; } - let ind = _.findIndex(this._programs, function(prog) { + const ind = _.findIndex(this._programs, function(prog) { return prog.name === (prog_name); }); //don't fail on removing non existent program @@ -138,7 +138,7 @@ class SupervisorCtrl { } get_mongo_services() { - let mongo_progs = []; + const mongo_progs = []; return P.resolve() .then(() => this.init()) .then(() => { @@ -164,7 +164,7 @@ class SupervisorCtrl { } add_agent(agent_name, args_str) { - let prog = {}; + const prog = {}; prog.directory = config.SUPERVISOR_DEFAULTS.DIRECTORY; prog.stopsignal = config.SUPERVISOR_DEFAULTS.STOPSIGNAL; prog.command = '/usr/local/bin/node src/agent/agent_cli.js ' + args_str; @@ -217,17 +217,17 @@ class SupervisorCtrl { } this._programs = []; //run target by target and create the services structure - var programs = _.split(data, config.SUPERVISOR_PROGRAM_SEPERATOR); + const programs = _.split(data, config.SUPERVISOR_PROGRAM_SEPERATOR); _.each(programs, p => { - let program_obj = {}; - let lines = _.split(p, '\n'); + const program_obj = {}; + const lines = _.split(p, '\n'); _.each(lines, function(l) { // For non empty lines if (l.length !== 0) { if (l[0] === '[') { program_obj.name = l.slice(l.indexOf(':') + 1, l.indexOf(']')); } else { - let parts = _.split(l, '='); + const parts = _.split(l, '='); program_obj[parts[0]] = parts[1]; } } diff --git a/src/server/web_server.js b/src/server/web_server.js index 0427540247..fa1215e02c 100755 --- a/src/server/web_server.js +++ b/src/server/web_server.js @@ -43,7 +43,7 @@ const dev_mode = (process.env.DEV_MODE === 'true'); const app = express(); if (process.env.NOOBAA_LOG_LEVEL) { - let dbg_conf = debug_config.get_debug_config(process.env.NOOBAA_LOG_LEVEL); + const dbg_conf = debug_config.get_debug_config(process.env.NOOBAA_LOG_LEVEL); dbg_conf.core.map(module => dbg.set_module_level(dbg_conf.level, module)); } @@ -87,7 +87,7 @@ async function start_web_server() { // we register the rpc before listening on the port // in order for the rpc services to be ready immediately // with the http services like /version - var http_server = http.createServer(app); + const http_server = http.createServer(app); server_rpc.rpc.register_ws_transport(http_server); await P.ninvoke(http_server, 'listen', http_port); @@ -124,12 +124,12 @@ app.use(function(req, res, next) { // however our nodejs server is always http so the flag is false, // and on heroku only the router does ssl, // so we need to pull the heroku router headers to check. - var fwd_proto = req.get('X-Forwarded-Proto'); + const fwd_proto = req.get('X-Forwarded-Proto'); // var fwd_port = req.get('X-Forwarded-Port'); // var fwd_from = req.get('X-Forwarded-For'); // var fwd_start = req.get('X-Request-Start'); if (fwd_proto === 'http') { - var host = req.get('Host'); + const host = req.get('Host'); return res.redirect('https://' + host + req.originalUrl); } return next(); @@ -158,8 +158,8 @@ app.get('/', function(req, res) { app.get('/get_latest_version*', function(req, res) { if (req.params[0].indexOf('&curr=') !== -1) { try { - var query_version = req.params[0].substr(req.params[0].indexOf('&curr=') + 6); - var ret_version = ''; + const query_version = req.params[0].substr(req.params[0].indexOf('&curr=') + 6); + let ret_version = ''; if (!is_latest_version(query_version)) { ret_version = config.on_premise.base_url + process.env.CURRENT_VERSION + '/' + config.on_premise.nva_part; @@ -201,7 +201,7 @@ app.post('/set_log_level*', function(req, res) { //Log level getter app.get('/get_log_level', function(req, res) { - var all_modules = util.inspect(dbg.get_module_structure(), true, 20); + const all_modules = util.inspect(dbg.get_module_structure(), true, 20); res.status(200).send({ all_levels: all_modules, @@ -337,7 +337,7 @@ function _create_nsfs_report() { // since we usually have less routes then files, and the routes are in memory. function cache_control(seconds) { - var millis = 1000 * seconds; + const millis = 1000 * seconds; return function(req, res, next) { res.setHeader("Cache-Control", "public, max-age=" + seconds); res.setHeader("Expires", new Date(Date.now() + millis).toUTCString()); @@ -361,7 +361,7 @@ app.use('/public/audit.csv', express.static(path.join('/log', 'audit.csv'))); app.use(error_404); app.use(function(err, req, res, next) { console.error('ERROR:', err); - var e; + let e; if (dev_mode) { // show internal info only on development e = err; @@ -375,7 +375,7 @@ app.use(function(err, req, res, next) { res.status(e.statusCode); if (can_accept_html(req)) { - var ctx = { //common_api.common_server_data(req); + const ctx = { //common_api.common_server_data(req); data: {} }; if (dev_mode) { @@ -425,17 +425,17 @@ function can_accept_html(req) { // Check if given version is the latest version, or are there newer ones // Version is in the form of X.Y.Z, start checking from left to right function is_latest_version(query_version) { - var srv_version = process.env.CURRENT_VERSION; + const srv_version = process.env.CURRENT_VERSION; console.log('Checking version', query_version, 'against', srv_version); if (query_version === srv_version) { return true; } - var srv_version_parts = srv_version.toString().split('.'); - var query_version_parts = query_version.split('.'); + const srv_version_parts = srv_version.toString().split('.'); + const query_version_parts = query_version.split('.'); - var len = Math.min(srv_version_parts.length, query_version_parts.length); + const len = Math.min(srv_version_parts.length, query_version_parts.length); // Compare common parts for (let i = 0; i < len; i++) { diff --git a/src/test/framework/consolidate_test_reports.js b/src/test/framework/consolidate_test_reports.js index 404ddc7885..61e94da8dd 100644 --- a/src/test/framework/consolidate_test_reports.js +++ b/src/test/framework/consolidate_test_reports.js @@ -32,7 +32,7 @@ class ConsolidateReports { } async _fetch_data() { - let start_date = new Date(); + const start_date = new Date(); start_date.setDate(start_date.getDate() - this._date_back_offset_days); try { const raw_data = await this._db.collection('reports').aggregate([ diff --git a/src/test/framework/convert.js b/src/test/framework/convert.js index eacc08447d..ccb0188ea8 100644 --- a/src/test/framework/convert.js +++ b/src/test/framework/convert.js @@ -8,7 +8,7 @@ const argv = require('minimist')(process.argv); let res_files = []; -let { +const { result, output_file = `./out_test.csv` } = argv; diff --git a/src/test/framework/report.js b/src/test/framework/report.js index 52de74ae3f..c67a20e0f8 100644 --- a/src/test/framework/report.js +++ b/src/test/framework/report.js @@ -166,7 +166,7 @@ Didn't Run: ${JSON.stringify( await this._mongo_client.db().collection('reports').insert(payload); console.info('report sent to remote mongo'); } else if (process.env.SEND_REPORT) { - var options = { + const options = { uri: 'http://' + this.host + ':' + this.port, method: 'POST', json: payload diff --git a/src/test/lambda/delete_backup_file_func.js b/src/test/lambda/delete_backup_file_func.js index 9ca8b6b666..38779f86ea 100644 --- a/src/test/lambda/delete_backup_file_func.js +++ b/src/test/lambda/delete_backup_file_func.js @@ -1,12 +1,12 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var AWS = require('aws-sdk'); +const AWS = require('aws-sdk'); exports.handler = function(event, context, callback) { - var srcBucket = event.Records[0].s3.bucket.name; - var key = event.Records[0].s3.object.key; - var s3 = new AWS.S3(); + const srcBucket = event.Records[0].s3.bucket.name; + const key = event.Records[0].s3.object.key; + const s3 = new AWS.S3(); s3.deleteObject({ Bucket: srcBucket, diff --git a/src/test/lambda/denial_of_service_func.js b/src/test/lambda/denial_of_service_func.js index 06b0714e1d..1fba8a9a07 100644 --- a/src/test/lambda/denial_of_service_func.js +++ b/src/test/lambda/denial_of_service_func.js @@ -4,19 +4,19 @@ const AWS = require('aws-sdk'); exports.handler = function(event, context, callback) { - var start = Date.now(); - var end = start + event.time; - var num_calls = 0; - var num_errors = 0; - var took = 0; - var lambda = new AWS.Lambda(event.lambda_conf); + const start = Date.now(); + const end = start + event.time; + let num_calls = 0; + let num_errors = 0; + let took = 0; + const lambda = new AWS.Lambda(event.lambda_conf); - for (var i = 0; i < event.concur; ++i) { + for (let i = 0; i < event.concur; ++i) { worker(); } function worker() { - var now = Date.now(); + const now = Date.now(); if (now >= end) { return callback(null, { num_calls: num_calls, diff --git a/src/test/lambda/word_count_func.js b/src/test/lambda/word_count_func.js index e5b6e52df9..ea006539db 100644 --- a/src/test/lambda/word_count_func.js +++ b/src/test/lambda/word_count_func.js @@ -1,12 +1,12 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var http = require('http'); -var https = require('https'); -var crypto = require('crypto'); +const http = require('http'); +const https = require('https'); +const crypto = require('crypto'); exports.handler = function(event, context, callback) { - var text = ''; + let text = ''; if (event.random) { text = random_text(event.random); @@ -26,7 +26,7 @@ exports.handler = function(event, context, callback) { text += data; }) .once('end', () => { - var reply = count_text(text, event.return_text); + const reply = count_text(text, event.return_text); reply.status_code = res.statusCode; reply.headers = res.headers; callback(null, reply); @@ -40,8 +40,8 @@ exports.handler = function(event, context, callback) { }; function count_text(text, return_text) { - var words = text.match(/\S+/g); - var lines = text.match(/\n/g); + const words = text.match(/\S+/g); + const lines = text.match(/\n/g); return { bytes: Buffer.byteLength(text), chars: text.length, @@ -52,15 +52,15 @@ function count_text(text, return_text) { } function random_text(length) { - var str = ''; - var WORDSET = 'abcdefghijklmnopqrstuvwxyz'; - var CHARSET = WORDSET + ' '.repeat(0.2 * WORDSET.length) + '\n'.repeat(0.1 * WORDSET.length); - var cipher = crypto.createCipheriv('aes-128-gcm', crypto.randomBytes(16), crypto.randomBytes(12)); - var zero_buf = Buffer.alloc(Math.min(1024, length)); + let str = ''; + const WORDSET = 'abcdefghijklmnopqrstuvwxyz'; + const CHARSET = WORDSET + ' '.repeat(0.2 * WORDSET.length) + '\n'.repeat(0.1 * WORDSET.length); + const cipher = crypto.createCipheriv('aes-128-gcm', crypto.randomBytes(16), crypto.randomBytes(12)); + const zero_buf = Buffer.alloc(Math.min(1024, length)); while (length > 0) { - var rand_buf = cipher.update(zero_buf); - for (var i = 0; i < rand_buf.length; ++i) { - var b = rand_buf[i]; + const rand_buf = cipher.update(zero_buf); + for (let i = 0; i < rand_buf.length; ++i) { + const b = rand_buf[i]; str += CHARSET[b % CHARSET.length]; } length -= zero_buf.length; diff --git a/src/test/pipeline/account_test.js b/src/test/pipeline/account_test.js index 30a8c564e1..214b1aa80b 100644 --- a/src/test/pipeline/account_test.js +++ b/src/test/pipeline/account_test.js @@ -14,7 +14,7 @@ dbg.set_process_name(test_name); let rpc; let client; -let errors = []; +const errors = []; let failures_in_test = false; const DEFAULT_EMAIL = 'demo@noobaa.com'; @@ -45,10 +45,10 @@ const YELLOW = "\x1b[33;1m"; const RED = "\x1b[31;1m"; const NC = "\x1b[0m"; -let TEST_CFG = _.defaults(_.pick(argv, _.keys(TEST_CFG_DEFAULTS)), TEST_CFG_DEFAULTS); +const TEST_CFG = _.defaults(_.pick(argv, _.keys(TEST_CFG_DEFAULTS)), TEST_CFG_DEFAULTS); Object.freeze(TEST_CFG); -let report = new Report(); +const report = new Report(); function usage() { console.log(` @@ -151,8 +151,8 @@ function set_account_details(has_login, account_name, email, s3_access) { async function create_account(has_login, account_name) { //building an account parameters object. console.log(`Creating account: ${account_name} with access login: ${has_login} s3 access: ${TEST_CFG.s3_access}`); - let email = account_name + TEST_CFG.emailSuffix; - let accountData = set_account_details(has_login, account_name, email, TEST_CFG.s3_access); + const email = account_name + TEST_CFG.emailSuffix; + const accountData = set_account_details(has_login, account_name, email, TEST_CFG.s3_access); try { await client.account.create_account(accountData); await report.success('create_account'); diff --git a/src/test/pipeline/dataset.js b/src/test/pipeline/dataset.js index da44c4c501..c5a46a8cf9 100644 --- a/src/test/pipeline/dataset.js +++ b/src/test/pipeline/dataset.js @@ -88,7 +88,7 @@ const s3ops = new S3OPS({ update_dataset_sizes(); -let report = new Report(); +const report = new Report(); const cases = [ 'RANDOM', 'UPLOAD_NEW', @@ -184,7 +184,7 @@ const ACTION_TYPES = [{ randomizer: multi_delete_randomizer }]; -let RANDOM_SELECTION = []; +const RANDOM_SELECTION = []; /* Populate array for random selection according to desired weights @@ -193,7 +193,7 @@ function populate_random_selection() { for (let i = 0; i < ACTION_TYPES.length; ++i) { const selected = ACTION_TYPES[i]; if (selected.include_random) { - let calc_weight = Math.floor(selected.weight); + const calc_weight = Math.floor(selected.weight); if (!selected.randomizer) { console.error(`ACTION ${selected.name} does not include a randomizer, cannot create setup`); } @@ -296,7 +296,7 @@ function get_filename() { } else if (TEST_CFG.max_depth === 0) { file_name = `${DATASET_NAME}_`; } else { - let random_max_depth = Math.floor(Math.random() * (TEST_CFG.max_depth + 1)); + const random_max_depth = Math.floor(Math.random() * (TEST_CFG.max_depth + 1)); console.log(`random_max_depth: ${random_max_depth}`); if (random_max_depth <= TEST_CFG.min_depth) { if (random_max_depth === 0) { @@ -392,7 +392,7 @@ async function read(params) { } async function read_range_randomizer() { - let rand_parts = (Math.floor(Math.random() * (TEST_CFG.part_num_high - TEST_CFG.part_num_low)) + + const rand_parts = (Math.floor(Math.random() * (TEST_CFG.part_num_high - TEST_CFG.part_num_low)) + TEST_CFG.part_num_low); const randomFile = await get_random_file(); console.info(`Selected to read_range: ${randomFile.filename}, size: ${randomFile.extra && randomFile.extra.size}, with ${ @@ -409,7 +409,7 @@ async function read_range(params) { async function upload_new_randomizer() { let is_multi_part = Math.floor(Math.random() * 2) === 0; - let rand_size = set_fileSize(); + const rand_size = set_fileSize(); let file_name; let rand_parts; if (is_multi_part) { @@ -430,7 +430,7 @@ async function upload_new_randomizer() { console.log('Uploading a new key'); file_name = get_filename(); } - let res = { + const res = { is_multi_part, rand_size, file_name, @@ -488,8 +488,8 @@ async function upload_new(params) { } function upload_abort_randomizer() { - let file_name = get_filename(); //No versionid for uploads, no need to handle versioning - let res = { + const file_name = get_filename(); //No versionid for uploads, no need to handle versioning + const res = { is_multi_part: true, file_name, }; @@ -508,7 +508,7 @@ async function upload_and_abort(params) { async function upload_overwrite_randomizer() { //upload overwrite in a versioning case would simply create a new version, no need for special handling - let rand_size = set_fileSize(); + const rand_size = set_fileSize(); let is_multi_part = Math.floor(Math.random() * 2) === 0; let rand_parts; if (is_multi_part) { @@ -520,7 +520,7 @@ async function upload_overwrite_randomizer() { is_multi_part = false; } const rfile = await s3ops.get_a_random_file(TEST_CFG.bucket, DATASET_NAME); - let res = { + const res = { is_multi_part, rand_size, filename: rfile.Key, @@ -634,7 +634,7 @@ async function set_attribute_randomizer() { // putObjectTagging - 50% // copyObject - 50% // let useCopy = Math.floor(Math.random() * 2) === 0; - let useCopy = true; //currently doing only copy due to bug #3228 + const useCopy = true; //currently doing only copy due to bug #3228 const randomFile = await get_random_file(); return { @@ -776,7 +776,7 @@ function run_test(throw_on_fail) { } function run_replay() { - let journal = []; + const journal = []; const readfile = readline.createInterface({ input: fs.createReadStream(argv.replay), terminal: false diff --git a/src/test/pipeline/namespace_cache_range_read_test.js b/src/test/pipeline/namespace_cache_range_read_test.js index 92e0c78fe1..730ec55a51 100644 --- a/src/test/pipeline/namespace_cache_range_read_test.js +++ b/src/test/pipeline/namespace_cache_range_read_test.js @@ -41,7 +41,7 @@ async function test_case_range_read_initial_read_size_not_across_blocks({ type, start: time_start, } }); - let time_end = (new Date()).getTime(); + const time_end = (new Date()).getTime(); // Expect block1 will be cached, so we will have block1 and block3 cached // blocks : | b0 | b1(to be cached) | b2 | b3(cached) | ..... @@ -163,7 +163,7 @@ async function test_case_range_read_range_variations({ type, ns_context }) { const { block_size, block_size_kb } = ns_context; const size_block_count = 4; const file_name = `${prefix}_${block_size_kb * size_block_count}_KB`; - let time_start = (new Date()).getTime(); + const time_start = (new Date()).getTime(); // Expect block3 will be cached // blocks : | b0 | b1(to be cached) | b2(to be cached) | b3 | @@ -302,7 +302,7 @@ async function test_case_range_read_from_entire_object_to_partial({ type, ns_con // blocks : | b0 | b1(to be cached) | b2 | // read range : <--> let range_size = 100; - let start = block_size + 100; + const start = block_size + 100; let end = start + range_size - 1; // Read the same range twice. for (let i = 0; i < 2; i++) { @@ -360,12 +360,12 @@ async function test_case_range_read_from_partial_to_entire_object({ type, ns_con // Expect block1 to be cached // blocks : | b0 | b1(to be cached) | b2 | // read range : <--> - let range_size = 100; + const range_size = 100; let start = block_size + 100; let end = start + range_size - 1; const time_start = (new Date()).getTime(); await ns_context.upload_directly_to_cloud(type, file_name); - let cloud_obj_md = await ns_context.get_via_cloud(type, file_name); + const cloud_obj_md = await ns_context.get_via_cloud(type, file_name); await ns_context.validate_range_read({ type, file_name, cloud_obj_md, @@ -452,7 +452,7 @@ async function test_case_range_read_small_file({ type, ns_context }) { let range_size = 100; let start = config.INLINE_MAX_SIZE; let end = start + range_size - 1; - let time_start = (new Date()).getTime(); + const time_start = (new Date()).getTime(); await ns_context.upload_directly_to_cloud(type, file_name); const cloud_obj_md = await ns_context.get_via_cloud(type, file_name); @@ -510,7 +510,7 @@ async function test_case_range_read_if_match_etag_failure({ type, ns_context }) // Expect block1 to be cached // blocks : | b0 | b1(to be cached) | b2 | // read range : <--> - let range_size = 100; + const range_size = 100; let start = block_size + 100; let end = start + range_size - 1; let time_start = (new Date()).getTime(); diff --git a/src/test/pipeline/namespace_test.js b/src/test/pipeline/namespace_test.js index 4e771302de..66d16a3af4 100644 --- a/src/test/pipeline/namespace_test.js +++ b/src/test/pipeline/namespace_test.js @@ -40,8 +40,8 @@ if (cloud_list.length === 0) { process.exit(0); } -let errors = []; -let failures_in_test = false; +const errors = []; +const failures_in_test = false; //define colors // const YELLOW = "\x1b[33;1m"; @@ -82,7 +82,7 @@ if (help) { const rpc = api.new_rpc_from_base_address(`wss://${mgmt_ip}:${mgmt_port_https}`, 'EXTERNAL'); const client = rpc.new_client({}); -let report = new Report(); +const report = new Report(); const cases = [ 'read via namespace AWS', @@ -110,7 +110,7 @@ const cases = [ ]; report.init_reporter({ suite: test_name, conf: { aws: true, azure: true }, mongo_report: true, cases: cases }); -let cf = new CloudFunction(client); +const cf = new CloudFunction(client); const bucket_functions = new BucketFunctions(client); const AWSDefaultConnection = cf.getAWSConnection(); @@ -257,7 +257,7 @@ async function isFilesAvailableInNooBaaBucket(gateway, files, type) { } async function uploadFileToNoobaaS3(bucket, file_name) { - let { data_multiplier } = unit_mapping.KB; + const { data_multiplier } = unit_mapping.KB; try { await s3ops.put_file_with_md5(bucket, file_name, 15, data_multiplier); } catch (err) { @@ -277,7 +277,7 @@ async function _delete_namesapace_bucket(bucket) { } async function set_rpc_and_create_auth_token() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' diff --git a/src/test/pipeline/quota_test.js b/src/test/pipeline/quota_test.js index 262c7a3784..31db5b03d2 100644 --- a/src/test/pipeline/quota_test.js +++ b/src/test/pipeline/quota_test.js @@ -71,7 +71,7 @@ if (argv.help) { const rpc = api.new_rpc_from_base_address(`wss://${mgmt_ip}:${mgmt_port_https}`, 'EXTERNAL'); const client = rpc.new_client({}); -let report = new Report(); +const report = new Report(); const cases = [ 'fail upload over quota', diff --git a/src/test/pipeline/system_config.js b/src/test/pipeline/system_config.js index 2cd631edac..cb132648ab 100644 --- a/src/test/pipeline/system_config.js +++ b/src/test/pipeline/system_config.js @@ -17,7 +17,7 @@ let rpc; let client; let failures_in_test = false; -let errors = []; +const errors = []; const { mgmt_ip, @@ -47,7 +47,7 @@ if (help) { process.exit(1); } -let report = new Report(); +const report = new Report(); const cases = [ 'set_maintenance_mode', 'update_n2n_config_single_port', @@ -110,9 +110,9 @@ async function update_n2n_config_and_check_single_port(port) { tcp_permanent_passive: { port } } }); - let system_info = await client.system.read_system({}); + const system_info = await client.system.read_system({}); const tcp_port = system_info.n2n_config.tcp_permanent_passive.port; - let n2n_config = JSON.stringify(system_info.n2n_config); + const n2n_config = JSON.stringify(system_info.n2n_config); if (tcp_port === port) { console.log(`The single tcp port is: ${port} - as should`); await report.success(`update_n2n_config_single_port`); diff --git a/src/test/qa/agents_matrix.js b/src/test/qa/agents_matrix.js index c5cadfbf0f..b70c3d26f1 100644 --- a/src/test/qa/agents_matrix.js +++ b/src/test/qa/agents_matrix.js @@ -50,8 +50,8 @@ const api = require('../../api'); const rpc = api.new_rpc_from_base_address(`wss://${mgmt_ip}:${mgmt_port}`, 'EXTERNAL'); const client = rpc.new_client({}); -let nodes = []; -let errors = []; +const nodes = []; +const errors = []; function saveErrorAndExit(message) { console.error(message); diff --git a/src/test/qa/cloud_test.js b/src/test/qa/cloud_test.js index 55eb18ee35..6ef020d018 100644 --- a/src/test/qa/cloud_test.js +++ b/src/test/qa/cloud_test.js @@ -157,7 +157,7 @@ let remote_bucket_names = []; const cloudPoolForCompatible = 'AZURE-for-compatible'; async function set_rpc_and_create_auth_token(client_to_auth) { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' diff --git a/src/test/qa/data_availability_test.js b/src/test/qa/data_availability_test.js index 93f60e6f2d..06e7c69a4c 100644 --- a/src/test/qa/data_availability_test.js +++ b/src/test/qa/data_availability_test.js @@ -11,8 +11,8 @@ const test_utils = require('../system_tests/test_utils'); dbg.set_process_name('data_availability'); -let files = []; -let errors = []; +const files = []; +const errors = []; let current_size = 0; const POOL_NAME = "first-pool"; let failures_in_test = false; @@ -73,7 +73,7 @@ if (help) { const rpc = api.new_rpc_from_base_address(`wss://${mgmt_ip}:${mgmt_port_https}`, 'EXTERNAL'); const client = rpc.new_client({}); -let report = new Report(); +const report = new Report(); //Define test cases const cases = [ 'verify file availability', @@ -144,13 +144,13 @@ function set_fileSize() { } async function uploadAndVerifyFiles() { - let { data_multiplier } = unit_mapping.MB; + const { data_multiplier } = unit_mapping.MB; console.log('Writing and deleting data till size amount to grow ' + dataset_size + ' MB'); while (current_size < dataset_size) { try { console.log('Uploading files till data size grow to ' + dataset_size + ', current size is ' + current_size); - let file_size = set_fileSize(); - let file_name = 'file_part_' + file_size + (Math.floor(Date.now() / 1000)); + const file_size = set_fileSize(); + const file_name = 'file_part_' + file_size + (Math.floor(Date.now() / 1000)); files.push(file_name); current_size += file_size; console.log('Uploading file with size ' + file_size + ' MB'); @@ -192,7 +192,7 @@ async function stopAgentsAndCheckFiles() { } async function set_rpc_and_create_auth_token() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' diff --git a/src/test/qa/data_resiliency_test.js b/src/test/qa/data_resiliency_test.js index df66411243..7f4548994e 100644 --- a/src/test/qa/data_resiliency_test.js +++ b/src/test/qa/data_resiliency_test.js @@ -11,8 +11,8 @@ dbg.set_process_name('data_availability'); let failures_in_test = false; const POOL_NAME = "first-pool"; -let errors = []; -let files = []; +const errors = []; +const files = []; let current_size = 0; //defining the required parameters @@ -120,13 +120,13 @@ function set_fileSize() { } async function _uploadAndVerifyFiles() { - let { data_multiplier } = unit_mapping.MB; + const { data_multiplier } = unit_mapping.MB; console.log('Writing and deleting data till size amount to grow ' + dataset_size + ' MB'); while (current_size < dataset_size) { try { console.log('Uploading files till data size grow to ' + dataset_size + ', current size is ' + current_size); - let file_size = set_fileSize(); - let file_name = 'file_part_' + file_size + (Math.floor(Date.now() / 1000)); + const file_size = set_fileSize(); + const file_name = 'file_part_' + file_size + (Math.floor(Date.now() / 1000)); files.push(file_name); current_size += file_size; console.log('Uploading file with size ' + file_size + ' MB'); diff --git a/src/test/qa/load.js b/src/test/qa/load.js index 48b82ffefe..d56dbcf46a 100644 --- a/src/test/qa/load.js +++ b/src/test/qa/load.js @@ -16,7 +16,7 @@ mocha.describe('UPLOAD TESTS:', function() { let index = 0; [basic_size, 10 * basic_size, 100 * basic_size].forEach(function(file_size) { - let file_name = basic_name + index; + const file_name = basic_name + index; index += 1; mocha.it('Upload single file of size:' + file_size + ' MB in one thread', async function() { @@ -35,10 +35,10 @@ mocha.describe('UPLOAD TESTS:', function() { let file_name = 'file_' + (Math.floor(Date.now() / 1000) + 1); let file_size = 10; - let num_of_files = 10; + const num_of_files = 10; mocha.it('Upload multiple file of size:' + file_size + ' MB in different threads', function() { - let promises = []; + const promises = []; console.info('> Uploading ' + num_of_files + ' files to Noobaa in differnet threads'); for (let i = 0; i < num_of_files; i++) { console.info('* Uploading file number ' + (i + 1) + ' out of ' + num_of_files + ' named: ' + (file_name + i)); @@ -50,7 +50,7 @@ mocha.describe('UPLOAD TESTS:', function() { return_stdout: true })) .then(reply => { - var num_of_created = reply.split(/\r\n|\r|\n/).length - 1; + const num_of_created = reply.split(/\r\n|\r|\n/).length - 1; assert.equal(num_of_created, num_of_files, 'Not all the files were created!, only ' + num_of_created); console.info('> Found ' + num_of_created + ' new files in NooBaa as should'); }); @@ -61,8 +61,8 @@ mocha.describe('UPLOAD TESTS:', function() { file_name = 'file_' + (Math.floor(Date.now() / 1000) + 2); file_size = 512; // 1/2GB - let concur = 10; // number of multiparts used - let part_size = Math.floor(file_size / concur); + const concur = 10; // number of multiparts used + const part_size = Math.floor(file_size / concur); mocha.it('Upload one big file ' + file_size + ' using multi part', async function() { console.info('> Uploading file: ' + file_name + ' to Noobaa with size of:' + file_size + ' MB'); diff --git a/src/test/qa/rebuild_replicas_test.js b/src/test/qa/rebuild_replicas_test.js index 395abd3414..3a0eadae28 100644 --- a/src/test/qa/rebuild_replicas_test.js +++ b/src/test/qa/rebuild_replicas_test.js @@ -11,8 +11,8 @@ const dbg = require('../../util/debug_module')(__filename); const { BucketFunctions } = require('../utils/bucket_functions'); dbg.set_process_name('rebuild_replicas'); -let files = []; -let errors = []; +const files = []; +const errors = []; const POOL_NAME = "first-pool"; //defining the required parameters @@ -62,7 +62,7 @@ if (help) { const rpc = api.new_rpc_from_base_address(`wss://${mgmt_ip}:${mgmt_port_https}`, 'EXTERNAL'); const client = rpc.new_client({}); -let report = new Report(); +const report = new Report(); //Define test cases const cases = [ 'correct num replicas after node failure', @@ -115,17 +115,17 @@ function saveErrorAndResume(message) { } async function uploadAndVerifyFiles(num_agents) { - let { data_multiplier } = unit_mapping.MB; + const { data_multiplier } = unit_mapping.MB; // 1/2 GB per agent. 1 GB seems like too much memory for the lg to handle - let dataset_size = num_agents * 128; - let parts = 20; - let partSize = dataset_size / parts; - let file_size = Math.floor(partSize); + const dataset_size = num_agents * 128; + const parts = 20; + const partSize = dataset_size / parts; + const file_size = Math.floor(partSize); let part = 0; console.log('Writing and deleting data till size amount to grow ' + num_agents + ' GB'); try { while (part < parts) { - let file_name = 'file_part_' + part + file_size + (Math.floor(Date.now() / 1000)); + const file_name = 'file_part_' + part + file_size + (Math.floor(Date.now() / 1000)); files.push(file_name); console.log('files list is ' + files); part += 1; @@ -141,7 +141,7 @@ async function uploadAndVerifyFiles(num_agents) { async function readFiles() { try { - for (let file of files) { + for (const file of files) { await s3ops.get_file_check_md5(bucket, file); } } catch (err) { @@ -252,7 +252,7 @@ async function stopAgentAndCheckRebuildReplicas() { } async function set_rpc_and_create_auth_token() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' diff --git a/src/test/qa/reclaim_test.js b/src/test/qa/reclaim_test.js index 763ac172f6..ec52d9c1ba 100644 --- a/src/test/qa/reclaim_test.js +++ b/src/test/qa/reclaim_test.js @@ -12,8 +12,8 @@ const { BucketFunctions } = require('../utils/bucket_functions'); const test_name = 'reclaim'; dbg.set_process_name(test_name); -let files = []; -let errors = []; +const files = []; +const errors = []; let current_size = 0; const POOL_NAME = "first-pool"; @@ -52,7 +52,7 @@ if (argv.help) { const rpc = api.new_rpc_from_base_address(`wss://${mgmt_ip}:${mgmt_port_https}`, 'EXTERNAL'); const client = rpc.new_client({}); -let report = new Report(); +const report = new Report(); //Define test cases const cases = [ 'reclaimed blocks', @@ -67,7 +67,7 @@ report.init_reporter({ cases: cases }); -let bucket_functions = new BucketFunctions(client); +const bucket_functions = new BucketFunctions(client); const baseUnit = 1024; const unit_mapping = { @@ -92,13 +92,13 @@ function saveErrorAndResume(message) { async function uploadAndVerifyFiles(bucket) { current_size = 0; - let { data_multiplier } = unit_mapping.MB; + const { data_multiplier } = unit_mapping.MB; console.log('Writing and deleting data till size amount to grow ' + dataset_size + ' MB'); while (current_size < dataset_size) { try { console.log('Uploading files till data size grow to ' + dataset_size + ', current size is ' + current_size); - let file_size = set_fileSize(); - let file_name = 'file_part_' + file_size + (Math.floor(Date.now() / 1000)); + const file_size = set_fileSize(); + const file_name = 'file_part_' + file_size + (Math.floor(Date.now() / 1000)); files.push(file_name); current_size += file_size; console.log('Uploading file with size ' + file_size + ' MB'); @@ -155,7 +155,7 @@ async function reclaimCycle(agents_num) { } async function set_rpc_and_create_auth_token() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' diff --git a/src/test/qa/tests_report_summary.js b/src/test/qa/tests_report_summary.js index 4d3de11046..229d8cb35a 100644 --- a/src/test/qa/tests_report_summary.js +++ b/src/test/qa/tests_report_summary.js @@ -33,7 +33,7 @@ function debug(...msg) { function arg_to_date(arg) { if (!arg) return; - let moment_date = moment.utc(arg, DATE_FORMAT); + const moment_date = moment.utc(arg, DATE_FORMAT); if (!moment_date.isValid()) error(`Invalid date format: ${arg}`); return moment_date.toDate(); } diff --git a/src/test/scripts/ec_in_db.js b/src/test/scripts/ec_in_db.js index a62f258709..d66364191a 100644 --- a/src/test/scripts/ec_in_db.js +++ b/src/test/scripts/ec_in_db.js @@ -2,11 +2,11 @@ /* eslint-env mongo */ 'use strict'; -var system = db.systems.findOne(); -var replicas_chunk_config = db.chunk_configs.findOne({ 'chunk_coder_config.parity_frags': 0 }); -var ec_chunk_config = db.chunk_configs.findOne({ 'chunk_coder_config.parity_frags': { $gt: 0 } }); +const system = db.systems.findOne(); +const replicas_chunk_config = db.chunk_configs.findOne({ 'chunk_coder_config.parity_frags': 0 }); +const ec_chunk_config = db.chunk_configs.findOne({ 'chunk_coder_config.parity_frags': { $gt: 0 } }); -var chunk_config; +let chunk_config; if (system.default_chunk_config.toString() === ec_chunk_config._id.toString()) { chunk_config = replicas_chunk_config; } else { diff --git a/src/test/system_tests/ceph_s3_tests/test_ceph_s3.js b/src/test/system_tests/ceph_s3_tests/test_ceph_s3.js index 9fa5f88aa6..6ad2d86a3b 100644 --- a/src/test/system_tests/ceph_s3_tests/test_ceph_s3.js +++ b/src/test/system_tests/ceph_s3_tests/test_ceph_s3.js @@ -17,7 +17,7 @@ const argv = require('minimist')(process.argv.slice(2)); delete argv._; const { S3_CEPH_TEST_STEMS, S3_CEPH_TEST_SIGV4, CEPH_TEST, DEFAULT_NUMBER_OF_WORKERS } = require('./test_ceph_s3_constants.js'); -let testing_status = { +const testing_status = { pass: [], fail: [], skip: [], diff --git a/src/test/system_tests/sanity_build_test.js b/src/test/system_tests/sanity_build_test.js index 2b3ee76e9b..0f66207532 100644 --- a/src/test/system_tests/sanity_build_test.js +++ b/src/test/system_tests/sanity_build_test.js @@ -152,8 +152,8 @@ async function _create_resources_and_buckets() { //Create bucket with various RP & Placement console.info('Creating Buckets'); - let buck1 = await TEST_CTX.bucketfunc.createBucket(TEST_CTX.bucket_mirror); - let buck2 = await TEST_CTX.bucketfunc.createBucket(TEST_CTX.bucket_spread); + const buck1 = await TEST_CTX.bucketfunc.createBucket(TEST_CTX.bucket_mirror); + const buck2 = await TEST_CTX.bucketfunc.createBucket(TEST_CTX.bucket_spread); console.info('Updating Tier to EC & Mirror'); await TEST_CTX.bucketfunc.changeTierSetting(TEST_CTX.bucket_mirror, 4, 2); //EC 4+2 diff --git a/src/test/system_tests/test_bucket_access.js b/src/test/system_tests/test_bucket_access.js index eeb8ff2af0..6b769098c3 100644 --- a/src/test/system_tests/test_bucket_access.js +++ b/src/test/system_tests/test_bucket_access.js @@ -340,7 +340,7 @@ async function test_delete_bucket_deletes_permissions() { await server.createBucket({ Bucket: unique_bucket_name }).promise(); - let bucket = await client.bucket.read_bucket({ rpc_params: { name: unique_bucket_name } }); + const bucket = await client.bucket.read_bucket({ rpc_params: { name: unique_bucket_name } }); assert(bucket.owner_account.email.unwrap() === full_access_user.email, 'expecting full_access_user to have permissions to access ' + unique_bucket_name); await server.deleteBucket({ Bucket: unique_bucket_name }).promise(); diff --git a/src/test/system_tests/test_bucket_lambda_triggers.js b/src/test/system_tests/test_bucket_lambda_triggers.js index 1000ec1cae..0a817e61ed 100644 --- a/src/test/system_tests/test_bucket_lambda_triggers.js +++ b/src/test/system_tests/test_bucket_lambda_triggers.js @@ -37,11 +37,11 @@ const TIME_FOR_SDK_TO_UPDATE = 60000; const NUM_OF_RETRIES = 10; const POOL_NAME = 'test-pool'; -var client = rpc.new_client({ +const client = rpc.new_client({ address: `ws://${mgmt_ip}:${mgmt_port}` }); -let full_access_user = { +const full_access_user = { name: 'full_access', email: 'full_access@noobaa.com', password: 'master', @@ -50,7 +50,7 @@ let full_access_user = { default_resource: POOL_NAME, }; -let bucket1_user = { +const bucket1_user = { name: 'bucket1_access', email: 'bucket1_access@noobaa.com', password: 'onlyb1', @@ -122,7 +122,7 @@ const trigger_based_func_read = { /*** Utils ***/ async function authenticate() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' @@ -143,8 +143,8 @@ function prepare_func(fn) { } function get_new_server(user) { - let access_key = user.access_keys.access_key.unwrap(); - let secret_key = user.access_keys.secret_key.unwrap(); + const access_key = user.access_keys.access_key.unwrap(); + const secret_key = user.access_keys.secret_key.unwrap(); return new AWS.S3({ endpoint: `http://${s3_ip}:${s3_port}`, s3ForcePathStyle: true, @@ -155,8 +155,8 @@ function get_new_server(user) { } function get_new_lambda(user) { - let access_key = user.access_keys.access_key.unwrap(); - let secret_key = user.access_keys.secret_key.unwrap(); + const access_key = user.access_keys.access_key.unwrap(); + const secret_key = user.access_keys.secret_key.unwrap(); return new AWS.Lambda({ region: 'us-east-1', endpoint: `http://${s3_ip}:${s3_port}`, @@ -329,7 +329,7 @@ async function run_test() { } async function test_add_function(user, func) { - let lambda = get_new_lambda(user); + const lambda = get_new_lambda(user); await prepare_func(func); try { @@ -356,18 +356,18 @@ async function test_add_bucket_trigger(type, func, bucketname) { async function test_trigger_run_when_should(user, file_param, bucketname) { console.log(`test trigger run for ${bucketname}`); - let s3 = get_new_server(user); + const s3 = get_new_server(user); let file_not_created = true; let retries = 0; const fname = await ops.generate_random_file(1); - let params1 = { + const params1 = { Bucket: bucketname, Key: file_param, Body: fs.createReadStream(fname) }; await s3.upload(params1).promise(); while (retries < NUM_OF_RETRIES && file_not_created) { - let params2 = { + const params2 = { Bucket: bucketname, Key: file_param + '.json' }; @@ -391,10 +391,10 @@ async function test_trigger_run_when_should(user, file_param, bucketname) { async function test_trigger_dont_run_when_shouldnt(user, file_param, bucketname) { console.log(`test trigger should not run for ${bucketname}`); - let s3 = get_new_server(user); + const s3 = get_new_server(user); const fname = await ops.generate_random_file(1); - let params1 = { + const params1 = { Bucket: bucketname, Key: file_param, Body: fs.createReadStream(fname) @@ -402,7 +402,7 @@ async function test_trigger_dont_run_when_shouldnt(user, file_param, bucketname) await s3.upload(params1).promise(); await P.delay(TIME_FOR_FUNC_TO_RUN); - let params2 = { + const params2 = { Bucket: bucketname, Key: file_param + '.json' }; @@ -418,12 +418,12 @@ async function test_trigger_dont_run_when_shouldnt(user, file_param, bucketname) async function test_delete_trigger_run(user, file_param, bucketname, multiple) { console.log(`test delete trigger run for ${bucketname}`); - let s3 = get_new_server(user); - let params = { + const s3 = get_new_server(user); + const params = { Bucket: bucketname, Key: file_param }; - let params2 = { + const params2 = { Bucket: bucketname, Key: file_param + '.json' }; @@ -466,7 +466,7 @@ async function test_delete_trigger_run(user, file_param, bucketname, multiple) { async function test_trigger_run_when_should_multi(user, bucketname, files_prefix, suffix, num_of_files) { console.log(`test multi delete trigger should run for ${bucketname}`); - let s3 = get_new_server(user); + const s3 = get_new_server(user); const names = []; for (let i = 0; i < num_of_files; ++i) { names.push(files_prefix + '_no_' + i + suffix); @@ -474,7 +474,7 @@ async function test_trigger_run_when_should_multi(user, bucketname, files_prefix const fname = await ops.generate_random_file(1); await P.map(names, name => { - let params1 = { + const params1 = { Bucket: bucketname, Key: name, Body: fs.createReadStream(fname) @@ -483,7 +483,7 @@ async function test_trigger_run_when_should_multi(user, bucketname, files_prefix }); await P.delay(TIME_FOR_FUNC_TO_RUN * 2); // wait for the functions to run... - let params2 = { + const params2 = { Bucket: bucketname, Prefix: files_prefix }; diff --git a/src/test/system_tests/test_bucket_placement.js b/src/test/system_tests/test_bucket_placement.js index cc7d41f908..5ca722f083 100644 --- a/src/test/system_tests/test_bucket_placement.js +++ b/src/test/system_tests/test_bucket_placement.js @@ -8,13 +8,13 @@ if (argv.log_file) { } dbg.set_process_name('test_bucket_placement'); -var basic_server_ops = require('../utils/basic_server_ops'); -var P = require('../../util/promise'); -var api = require('../../api'); -var _ = require('lodash'); +const basic_server_ops = require('../utils/basic_server_ops'); +const P = require('../../util/promise'); +const api = require('../../api'); +const _ = require('lodash'); const test_utils = require('./test_utils'); -var dotenv = require('../../util/dotenv'); +const dotenv = require('../../util/dotenv'); dotenv.load(); @@ -26,8 +26,8 @@ const { argv.access_key = argv.access_key || '123'; argv.secret_key = argv.secret_key || 'abc'; -var rpc = api.new_rpc(); -var client = rpc.new_client({ +const rpc = api.new_rpc(); +const client = rpc.new_client({ address: 'ws://' + mgmt_ip + ':' + mgmt_port }); @@ -40,7 +40,7 @@ module.exports = { // Does the Auth and returns the nodes in the system async function create_auth() { - var auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' @@ -130,8 +130,8 @@ async function perform_placement_tests() { key: fkey, }); _.each(chunks, chunk => { - var pool1_count = 0; - var pool2_count = 0; + let pool1_count = 0; + let pool2_count = 0; _.each(chunk.frags, frag => { _.each(frag.blocks, block => { if (block.adminfo.pool_name === 'pool1') { diff --git a/src/test/system_tests/test_build_chunks.js b/src/test/system_tests/test_build_chunks.js index daaa42f220..a1897bf042 100644 --- a/src/test/system_tests/test_build_chunks.js +++ b/src/test/system_tests/test_build_chunks.js @@ -32,7 +32,7 @@ const { } = argv; -let TEST_CTX = { +const TEST_CTX = { ip: 'localhost', s3_endpoint: `http://${s3_ip}:${s3_port}/`, default_bucket: 'first.bucket', @@ -44,11 +44,11 @@ let TEST_CTX = { accounts_default_resource: 'accounts_default_resource' }; -let rpc = api.new_rpc(); //'ws://' + argv.ip + ':8080'); -let client = rpc.new_client({ +const rpc = api.new_rpc(); //'ws://' + argv.ip + ':8080'); +const client = rpc.new_client({ address: `ws://${mgmt_ip}:${mgmt_port}` }); -let n2n_agent = rpc.register_n2n_agent((...args) => client.node.n2n_signal(...args)); +const n2n_agent = rpc.register_n2n_agent((...args) => client.node.n2n_signal(...args)); n2n_agent.set_any_rpc_address(); /////// Aux Functions //////// @@ -192,7 +192,7 @@ async function verify_object_health( test_corruption ) { console.log(`verifying object ${filename} health. expected num of blocks: ${expected_num_blocks}`); - let start_ts = Date.now(); + const start_ts = Date.now(); let obj_is_valid = false; let obj_is_verified = !test_corruption; diff --git a/src/test/system_tests/test_cloud_pools.js b/src/test/system_tests/test_cloud_pools.js index befafb4dad..19680cc515 100644 --- a/src/test/system_tests/test_cloud_pools.js +++ b/src/test/system_tests/test_cloud_pools.js @@ -1,17 +1,17 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var api = require('../../api'); -var rpc = api.new_rpc(); -var util = require('util'); -var _ = require('lodash'); -var AWS = require('aws-sdk'); -var argv = require('minimist')(process.argv); -var P = require('../../util/promise'); -var basic_server_ops = require('../utils/basic_server_ops'); -var dotenv = require('../../util/dotenv'); +const api = require('../../api'); +const rpc = api.new_rpc(); +const util = require('util'); +const _ = require('lodash'); +const AWS = require('aws-sdk'); +const argv = require('minimist')(process.argv); +const P = require('../../util/promise'); +const basic_server_ops = require('../utils/basic_server_ops'); +const dotenv = require('../../util/dotenv'); dotenv.load(); -var test_utils = require('./test_utils'); +const test_utils = require('./test_utils'); const s3 = new AWS.S3({ // endpoint: 'https://s3.amazonaws.com', @@ -25,7 +25,7 @@ const s3 = new AWS.S3({ // region: 'eu-central-1' }); -let TEST_CTX = { +const TEST_CTX = { source_ip: 'localhost', source_bucket: 'first.bucket', target_port: process.env.PORT || '5001', @@ -34,10 +34,10 @@ let TEST_CTX = { cloud_pool_name: 'majesticsloth', }; -var file_sizes = [1]; -var file_names = ['нуба_1', 'нуба_2', 'нуба_3']; +const file_sizes = [1]; +const file_names = ['нуба_1', 'нуба_2', 'нуба_3']; -var client = rpc.new_client({ +const client = rpc.new_client({ address: 'ws://' + TEST_CTX.source_ip + ':' + TEST_CTX.target_port }); @@ -56,7 +56,7 @@ function init_s3() { return; } - let object_keys = _.map(objects_to_delete, obj => ({ + const object_keys = _.map(objects_to_delete, obj => ({ Key: obj.Key })); @@ -85,7 +85,7 @@ function init_s3() { function list_all_s3_objects(bucket_name) { // Initialization of IsTruncated in order to perform the first while cycle - var listObjectsResponse = { + const listObjectsResponse = { is_truncated: true, objects: [], common_prefixes: [], @@ -104,14 +104,14 @@ function list_all_s3_objects(bucket_name) { }) .then(function(res) { listObjectsResponse.is_truncated = res.is_truncated; - let res_list = { + const res_list = { objects: res.Contents, common_prefixes: res.common_prefixes }; if (res_list.objects.length) { listObjectsResponse.objects = _.concat(listObjectsResponse.objects, res_list.objects); } - let last_obj = _.last(listObjectsResponse.objects); + const last_obj = _.last(listObjectsResponse.objects); listObjectsResponse.key_marker = last_obj && last_obj.key; }) .catch(function(err) { @@ -147,7 +147,7 @@ function put_object(s3_obj, bucket, key) { function authenticate() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' @@ -161,7 +161,7 @@ function authenticate() { function verify_object_parts_on_cloud_nodes(replicas_in_tier, bucket_name, object_key, cloud_pool) { // TODO: Currently set high because there is a problem with cloud resource test block write // That blocks the whole replication process - let abort_timeout_sec = 10 * 60; + const abort_timeout_sec = 10 * 60; let first_iteration = true; let blocks_correct = false; let start_ts; @@ -178,7 +178,7 @@ function verify_object_parts_on_cloud_nodes(replicas_in_tier, bucket_name, objec key: object_key, }) .then(function(obj_mapping_arg) { - let blocks_by_cloud_pool_name = { + const blocks_by_cloud_pool_name = { blocks: [] }; _.forEach(obj_mapping_arg.parts, part => { @@ -202,7 +202,7 @@ function verify_object_parts_on_cloud_nodes(replicas_in_tier, bucket_name, objec first_iteration = false; } - let diff = Date.now() - start_ts; + const diff = Date.now() - start_ts; if (diff > abort_timeout_sec * 1000) { throw new Error('aborted verify_object_parts_on_cloud_nodes after ' + abort_timeout_sec + ' seconds'); } @@ -237,14 +237,14 @@ function run_test() { name: TEST_CTX.source_bucket })) .then(function(source_bucket) { - let tier_name = source_bucket.tiering.tiers[0].tier; + const tier_name = source_bucket.tiering.tiers[0].tier; return client.tier.read_tier({ name: tier_name }) .then(function(tier) { replicas_in_tier = tier.chunk_coder_config.replicas; files_bucket_tier = tier; - let new_pools = tier.attached_pools.concat(TEST_CTX.cloud_pool_name); + const new_pools = tier.attached_pools.concat(TEST_CTX.cloud_pool_name); return client.tier.update_tier({ name: tier.name, attached_pools: new_pools, @@ -340,7 +340,7 @@ function run_test() { .then(() => block_ids); }) .then(function(block_ids) { - let new_pools = _.filter(files_bucket_tier.attached_pools, pool => String(pool) !== TEST_CTX.cloud_pool_name); + const new_pools = _.filter(files_bucket_tier.attached_pools, pool => String(pool) !== TEST_CTX.cloud_pool_name); // This is used in order to make sure that the blocks will be deleted from the cloud return client.tier.update_tier({ name: files_bucket_tier.name, diff --git a/src/test/system_tests/test_files_ul.js b/src/test/system_tests/test_files_ul.js index 0bbcb4e4a7..4e3ceabd20 100644 --- a/src/test/system_tests/test_files_ul.js +++ b/src/test/system_tests/test_files_ul.js @@ -44,14 +44,14 @@ function show_usage() { } function pre_generation() { - var dirs = Math.ceil(UL_TEST.num_files / UL_TEST.files_per_dir); + const dirs = Math.ceil(UL_TEST.num_files / UL_TEST.files_per_dir); console.log('Creating directory structure'); return os_utils.exec('mkdir -p ' + UL_TEST.base_dir) .then(function() { return os_utils.exec('rm -rf ' + UL_TEST.base_dir + '/*'); }) .then(function() { - var i = 0; + let i = 0; return P.pwhile( function() { return i < dirs; @@ -67,16 +67,16 @@ function pre_generation() { }) .then(function() { console.log('Generating files (this might take some time) ...'); - var d = 0; + let d = 0; return P.pwhile( function() { return d < dirs; }, function() { d += 1; - var files = (d === dirs) ? UL_TEST.num_files % UL_TEST.files_per_dir : UL_TEST.files_per_dir; + const files = (d === dirs) ? UL_TEST.num_files % UL_TEST.files_per_dir : UL_TEST.files_per_dir; console.log(' generating batch', d, 'of', files, 'files'); - for (var i = 1; i <= files; ++i) { + for (let i = 1; i <= files; ++i) { UL_TEST.files.push(UL_TEST.base_dir + '/dir' + d + '/file_' + i); } return os_utils.exec('for i in `seq 1 ' + files + '` ; do' + @@ -97,7 +97,7 @@ function upload_test() { Bucket: UL_TEST.bucket_name }); - var upload_semaphore = new Semaphore(UL_TEST.num_threads); + const upload_semaphore = new Semaphore(UL_TEST.num_threads); return P.all(_.map(UL_TEST.files, function(f) { return upload_semaphore.surround(function() { return upload_file(f); @@ -106,15 +106,15 @@ function upload_test() { } function upload_file(test_file) { - var start_ts; + let start_ts; console.log('Called upload_file with param', test_file); return P.fcall(function() { - var s3bucket = new AWS.S3({ + const s3bucket = new AWS.S3({ endpoint: UL_TEST.target, s3ForcePathStyle: true, sslEnabled: false, }); - var params = { + const params = { Bucket: UL_TEST.bucket_name, Key: test_file, Body: fs.createReadStream(test_file), @@ -157,7 +157,7 @@ function print_summary() { //} console.log('Test results, breakdown per each 1K uploads:'); - var i = 0; + let i = 0; _.each(UL_TEST.measurement.mid, function(m) { console.log(' for files', (i * 1000) + 1, 'to', (i + 1) * 1000, 'avg ul time', m); i += 1; @@ -167,7 +167,7 @@ function print_summary() { } function main() { - var missing_params = false; + let missing_params = false; //Verify Input Parameters if (_.isUndefined(argv.ip)) { diff --git a/src/test/system_tests/test_md_aggregator.js b/src/test/system_tests/test_md_aggregator.js index 9737965320..3abbfe9907 100644 --- a/src/test/system_tests/test_md_aggregator.js +++ b/src/test/system_tests/test_md_aggregator.js @@ -21,15 +21,15 @@ const SERVICES_WAIT_IN_SECONDS = 30; argv.ip = argv.ip || 'localhost'; argv.access_key = argv.access_key || '123'; argv.secret_key = argv.secret_key || 'abc'; -var rpc = api.new_rpc(); -var client = rpc.new_client({ +const rpc = api.new_rpc(); +const client = rpc.new_client({ address: 'ws://' + argv.ip + ':' + process.env.PORT }); // Does the Auth and returns the nodes in the system function create_auth() { - var auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' @@ -123,7 +123,7 @@ async function prepare_buckets_with_objects() { function calculate_expected_storage_stats_for_buckets(buckets_array, storage_read_by_bucket) { console.log('calculate_expected_storage_stats_for_buckets started'); return P.map_one_by_one(buckets_array, bucket => { - let current_bucket_storage = { + const current_bucket_storage = { chunks_capacity: 0, objects_size: 0, blocks_size: 0 @@ -171,7 +171,7 @@ function run_test() { .then(() => P.delay(5 * 60 * 1000)) .then(() => client.system.read_system({})) .then(sys_res => { - let storage_by_bucket = {}; + const storage_by_bucket = {}; sys_res.buckets.forEach(bucket => { if (String(bucket.name.unwrap()) !== 'first.bucket') { @@ -223,9 +223,9 @@ function wait_for_s3_and_web(max_seconds_to_wait) { } function wait_for_mongodb_to_start(max_seconds_to_wait) { - var isNotListening = true; - var MAX_RETRIES = max_seconds_to_wait; - var wait_counter = 1; + let isNotListening = true; + const MAX_RETRIES = max_seconds_to_wait; + let wait_counter = 1; //wait up to 10 seconds console.log('waiting for mongodb to start (1)'); @@ -262,9 +262,9 @@ function wait_for_mongodb_to_start(max_seconds_to_wait) { } function wait_for_server_to_start(max_seconds_to_wait, port) { - var isNotListening = true; - var MAX_RETRIES = max_seconds_to_wait; - var wait_counter = 1; + let isNotListening = true; + const MAX_RETRIES = max_seconds_to_wait; + let wait_counter = 1; //wait up to 10 seconds console.log('waiting for server to start (1)'); diff --git a/src/test/system_tests/test_node_failure.js b/src/test/system_tests/test_node_failure.js index c082d32e3f..2c8a45a0a6 100644 --- a/src/test/system_tests/test_node_failure.js +++ b/src/test/system_tests/test_node_failure.js @@ -8,16 +8,16 @@ if (argv.log_file) { } dbg.set_process_name('test_node_failure'); -let _ = require('lodash'); -let P = require('../../util/promise'); -let api = require('../../api'); -let ops = require('../utils/basic_server_ops'); -var dotenv = require('../../util/dotenv'); +const _ = require('lodash'); +const P = require('../../util/promise'); +const api = require('../../api'); +const ops = require('../utils/basic_server_ops'); +const dotenv = require('../../util/dotenv'); const { v4: uuid } = require('uuid'); dotenv.load(); -let suffix = uuid().split('-')[0]; +const suffix = uuid().split('-')[0]; const { mgmt_ip = 'localhost', @@ -27,7 +27,7 @@ const { -let TEST_CTX = { +const TEST_CTX = { num_of_agents: 10, bucket: 'test-bucket-' + suffix, pool: 'test-pool-' + suffix, @@ -38,8 +38,8 @@ let TEST_CTX = { }; -let rpc = api.new_rpc_from_base_address(`ws://${mgmt_ip}:${mgmt_port}`, 'INTERNAL'); //'ws://' + argv.ip + ':8080'); -let client = rpc.new_client(); +const rpc = api.new_rpc_from_base_address(`ws://${mgmt_ip}:${mgmt_port}`, 'INTERNAL'); //'ws://' + argv.ip + ':8080'); +const client = rpc.new_client(); module.exports = { run_test: run_test @@ -48,7 +48,7 @@ module.exports = { /////// Aux Functions //////// function authenticate() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' @@ -86,7 +86,7 @@ async function remove_agents() { function _list_nodes(retries) { - let query = { + const query = { filter: TEST_CTX.nodes_name, skip_mongo_nodes: true }; @@ -98,9 +98,9 @@ function _list_nodes(retries) { throw new Error('list nodes failed'); } if (reply.total_count < TEST_CTX.num_of_agents || reply.filter_counts.by_mode.INITIALIZING) { - let msg = `list nodes returned ${reply.total_count} nodes and ${reply.filter_counts.by_mode.INITIALIZING} initializing. ` + + const msg = `list nodes returned ${reply.total_count} nodes and ${reply.filter_counts.by_mode.INITIALIZING} initializing. ` + `expected (${TEST_CTX.num_of_agents}) nodes.`; - let total_tries = retries || 1; + const total_tries = retries || 1; if (total_tries > TEST_CTX.max_init_retries) { console.error(msg + `aborting after ${TEST_CTX.max_init_retries} retries`); throw new Error(msg + `aborting after ${TEST_CTX.max_init_retries} retries`); @@ -116,7 +116,7 @@ function _list_nodes(retries) { function create_test_pool() { return _list_nodes() .then(reply => { - let nodes = reply.nodes.map(node => ({ + const nodes = reply.nodes.map(node => ({ name: node.name })); TEST_CTX.nodes = nodes; @@ -213,7 +213,7 @@ function validate_mappings() { function test_node_fail_replicate() { // kill first node in the nodes array, and then test it's blocks - let node = _.keys(TEST_CTX.chunks_by_nodes)[0]; + const node = _.keys(TEST_CTX.chunks_by_nodes)[0]; return client.hosted_agents.remove_agent({ name: node }) diff --git a/src/test/system_tests/test_s3_authentication.js b/src/test/system_tests/test_s3_authentication.js index 55a2ffa8b5..2f9b94cfed 100644 --- a/src/test/system_tests/test_s3_authentication.js +++ b/src/test/system_tests/test_s3_authentication.js @@ -1,20 +1,20 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var AWS = require('aws-sdk'); -var api = require('../../api'); -var rpc = api.new_rpc(); -var argv = require('minimist')(process.argv); -var P = require('../../util/promise'); -var basic_server_ops = require('../utils/basic_server_ops'); +const AWS = require('aws-sdk'); +const api = require('../../api'); +const rpc = api.new_rpc(); +const argv = require('minimist')(process.argv); +const P = require('../../util/promise'); +const basic_server_ops = require('../utils/basic_server_ops'); // var _ = require('lodash'); // var assert = require('assert'); // var promise_utils = require('../../util/promise_utils'); -var dotenv = require('../../util/dotenv'); -var http = require('http'); +const dotenv = require('../../util/dotenv'); +const http = require('http'); dotenv.load(); -let TEST_PARAMS = { +const TEST_PARAMS = { ip: argv.ip || 'localhost', bucket: argv.bucket || 'first.bucket', port: argv.target_port || process.env.PORT, @@ -22,7 +22,7 @@ let TEST_PARAMS = { secret_key: argv.secret_key || 'abc', }; -var client = rpc.new_client({ +const client = rpc.new_client({ address: 'ws://localhost:' + process.env.PORT }); @@ -31,7 +31,7 @@ module.exports = { }; function authenticate() { - let auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' @@ -43,7 +43,7 @@ function authenticate() { function test_s3_connection() { return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -88,7 +88,7 @@ function list_buckets() { function getSignedUrl(bucket, obj, expiry) { console.log('GENERATE SIGNED_URL OBJECT: ', obj, ' FROM BUCKET: ', bucket); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -128,7 +128,7 @@ function httpGetAsPromise(url) { function create_bucket(name) { console.log('CREATE BUCKET: ', name); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -153,7 +153,7 @@ function create_bucket(name) { function create_folder(bucket, folder) { console.log('CREATE FOLDER: ', folder, ' IN BUCKET: ', bucket); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -179,7 +179,7 @@ function create_folder(bucket, folder) { function head_object(bucket, key) { console.log('HEAD OBJECT: ', key, ' FROM BUCKET: ', bucket); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -205,7 +205,7 @@ function head_object(bucket, key) { function get_object(bucket, key) { console.log('GET OBJECT: ', key, ' FROM BUCKET: ', bucket); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -231,7 +231,7 @@ function get_object(bucket, key) { function delete_object(bucket, key) { console.log('DELETE OBJECT: ', key, ' FROM BUCKET: ', bucket); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -258,7 +258,7 @@ function delete_object(bucket, key) { function delete_bucket(name) { console.log('DELETE BUCKET: ', name); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -283,7 +283,7 @@ function delete_bucket(name) { function delete_folder(bucket, folder) { console.log('DELETE FOLDER: ', folder, ' FROM BUCKET: ', bucket); return P.fcall(function() { - var s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: TEST_PARAMS.ip, accessKeyId: TEST_PARAMS.access_key, secretAccessKey: TEST_PARAMS.secret_key, @@ -317,8 +317,8 @@ function main() { } function run_test() { - let file_sizes = [1, 2, 3]; - let file_names = ['c3_нуба_1', 'c3_нуба_2', 'c3_нуба_3']; + const file_sizes = [1, 2, 3]; + const file_names = ['c3_нуба_1', 'c3_нуба_2', 'c3_нуба_3']; let fkey; let signed_url; return authenticate().then(() => test_s3_connection()) diff --git a/src/test/system_tests/test_utils.js b/src/test/system_tests/test_utils.js index ca76692075..7e14cb4943 100644 --- a/src/test/system_tests/test_utils.js +++ b/src/test/system_tests/test_utils.js @@ -1,8 +1,8 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var P = require('../../util/promise'); -var _ = require('lodash'); +const P = require('../../util/promise'); +const _ = require('lodash'); /** * @@ -14,11 +14,11 @@ var _ = require('lodash'); */ function blocks_exist_on_cloud(need_to_exist, pool_id, bucket_name, blocks, s3) { console.log('blocks_exist_on_cloud::', need_to_exist, pool_id, bucket_name); - var isDone = true; + let isDone = true; // Time in seconds to wait, notice that it will only check once a second. // This is done in order to lower the amount of checking requests. - var MAX_RETRIES = 10 * 60; - var wait_counter = 1; + const MAX_RETRIES = 10 * 60; + let wait_counter = 1; return P.pwhile( () => isDone, diff --git a/src/test/system_tests/upgradeonly.js b/src/test/system_tests/upgradeonly.js index 6de57c656b..60440a505d 100644 --- a/src/test/system_tests/upgradeonly.js +++ b/src/test/system_tests/upgradeonly.js @@ -1,10 +1,10 @@ /* Copyright (C) 2016 NooBaa */ "use strict"; -var _ = require('lodash'); -var P = require('../../util/promise'); -var ops = require('../utils/basic_server_ops'); -var argv = require('minimist')(process.argv); +const _ = require('lodash'); +const P = require('../../util/promise'); +const ops = require('../utils/basic_server_ops'); +const argv = require('minimist')(process.argv); function show_usage() { console.error('usage: node upgradeonly.js <--upgrade_pack path_to_upgrade_pack> <--target_ip ip>'); diff --git a/src/test/unit_tests/coretest.js b/src/test/unit_tests/coretest.js index 15e7f9b8b2..5662e1c051 100644 --- a/src/test/unit_tests/coretest.js +++ b/src/test/unit_tests/coretest.js @@ -233,7 +233,7 @@ function setup(options = {}) { if (_incomplete_rpc_coverage) { let had_missing = false; - for (let srv of api_coverage) { + for (const srv of api_coverage) { console.warn('API was not covered:', srv); had_missing = true; } diff --git a/src/test/unit_tests/test_agent_blocks_reclaimer.js b/src/test/unit_tests/test_agent_blocks_reclaimer.js index 85a60d0b07..b720d933d8 100644 --- a/src/test/unit_tests/test_agent_blocks_reclaimer.js +++ b/src/test/unit_tests/test_agent_blocks_reclaimer.js @@ -101,7 +101,7 @@ class ReclaimerMock extends AgentBlocksReclaimer { return P.resolve() .then(() => { blocks.forEach(block_rec => { - let block = this.blocks.find(mock_block => String(mock_block._id) === String(block_rec._id)); + const block = this.blocks.find(mock_block => String(mock_block._id) === String(block_rec._id)); // This allows us to mock failure of deletes if (block && !block.fail_to_delete) { block.reclaimed = new Date(); diff --git a/src/test/unit_tests/test_bucket_replication.js b/src/test/unit_tests/test_bucket_replication.js index abf0aba97b..ab89459053 100644 --- a/src/test/unit_tests/test_bucket_replication.js +++ b/src/test/unit_tests/test_bucket_replication.js @@ -247,7 +247,7 @@ mocha.describe('replication configuration bg worker tests', function() { //const namespace_buckets = []; let s3_owner; let scanner; - let s3_creds = { + const s3_creds = { s3ForcePathStyle: true, signatureVersion: 'v4', computeChecksums: true, @@ -395,8 +395,8 @@ mocha.describe('replication configuration bg worker tests', function() { }); mocha.it('run replication scanner and wait - no prefix - all objects should be uploaded', async function() { - let contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5); - for (let content of contents) { + const contents = await list_objects_and_wait(s3_owner, bucket_for_replications, 5); + for (const content of contents) { const key = content.Key; await s3_owner.deleteObject({ Bucket: bucket_for_replications, Key: key }).promise(); } @@ -459,7 +459,7 @@ mocha.describe('replication pagination tests', function() { //const namespace_buckets = []; let s3_owner; let scanner; - let s3_creds = { + const s3_creds = { s3ForcePathStyle: true, signatureVersion: 'v4', computeChecksums: true, @@ -485,7 +485,7 @@ mocha.describe('replication pagination tests', function() { // populate bucket2 for (let i = 0; i < 5; i++) { - let key = create_random_body(); + const key = create_random_body(); bucket2_keys.push(key); await put_object(s3_owner, bucket2, key); } @@ -539,8 +539,8 @@ mocha.describe('replication pagination tests', function() { }); mocha.it('list_buckets_and_compare - 1 ', async function() { - let src_keys = ['a1', 'a2', 'b1', 'b2', 'b3', 'b4']; - let dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'b1', 'b2', 'b4']; + const src_keys = ['a1', 'a2', 'b1', 'b2', 'b3', 'b4']; + const dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'b1', 'b2', 'b4']; bucket1_keys = src_keys; bucket2_keys = dst_keys; for (let i = 0; i < src_keys.length; i++) { @@ -569,8 +569,8 @@ mocha.describe('replication pagination tests', function() { }); mocha.it('list_buckets_and_compare - 2 ', async function() { - let src_keys = ['b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8']; - let dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'b6', 'b7']; + const src_keys = ['b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8']; + const dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'b6', 'b7']; bucket1_keys = src_keys; bucket2_keys = dst_keys; for (let i = 0; i < src_keys.length; i++) { @@ -599,8 +599,8 @@ mocha.describe('replication pagination tests', function() { }); mocha.it('list_buckets_and_compare - 3 ', async function() { - let src_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'z1', 'z2', 'z3', 'z4']; - let dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'c1', 'c2', 'c3', 'c4']; + const src_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'z1', 'z2', 'z3', 'z4']; + const dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'c1', 'c2', 'c3', 'c4']; for (let i = 0; i < src_keys.length; i++) { await put_object(s3_owner, bucket1, src_keys[i], src_keys[i]); } @@ -630,8 +630,8 @@ mocha.describe('replication pagination tests', function() { }); mocha.it('list_buckets_and_compare - 4 ', async function() { - let src_keys = ['a1', 'a2', 'a3', 'a4', 'a5']; - let dst_keys = ['a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14']; + const src_keys = ['a1', 'a2', 'a3', 'a4', 'a5']; + const dst_keys = ['a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14']; for (let i = 0; i < src_keys.length; i++) { await put_object(s3_owner, bucket1, src_keys[i], src_keys[i]); } @@ -655,11 +655,11 @@ mocha.describe('replication pagination tests', function() { mocha.it('list_buckets_and_compare - 5 ', async function() { - let src_keys = ['a1', 'a2', 'a3', 'a4', 'a5', + const src_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'c1', 'c2', 'c3', 'c4', 'c5', 'd1', 'd2', 'd3', 'd4', 'd5' ]; - let dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', + const dst_keys = ['a1', 'a2', 'a3', 'a4', 'a5', 'b1', 'b2', 'b3', 'b4', 'b5', 'c1', 'c2', 'c3', 'c4', 'c5', 'd1', 'd2', 'd3', 'd4', 'd5' @@ -703,8 +703,8 @@ mocha.describe('replication pagination tests', function() { }); mocha.it('list_buckets_and_compare - 6 ', async function() { - let src_keys = ['a1', 'a2', 'a3', 'a4', 'a5']; - let dst_keys = ['b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8']; + const src_keys = ['a1', 'a2', 'a3', 'a4', 'a5']; + const dst_keys = ['b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8']; bucket1_keys = src_keys; bucket2_keys = dst_keys; for (let i = 0; i < src_keys.length; i++) { @@ -810,7 +810,7 @@ async function list_all_objs_in_bucket(s3owner, bucket, prefix) { let marker; const elements = []; while (isTruncated) { - let params = { Bucket: bucket }; + const params = { Bucket: bucket }; if (prefix) params.Prefix = prefix; if (marker) params.Marker = marker; const response = await s3owner.listObjects(params).promise(); @@ -849,7 +849,7 @@ mocha.describe('Replication pagination test', function() { const buckets = [src_bucket, target_bucket]; let s3_owner; let scanner; - let s3_creds = { + const s3_creds = { s3ForcePathStyle: true, signatureVersion: 'v4', computeChecksums: true, @@ -857,8 +857,8 @@ mocha.describe('Replication pagination test', function() { region: 'us-east-1', httpOptions: { agent: new http.Agent({ keepAlive: false }) }, }; - let src_bucket_keys = []; - let target_bucket_keys = []; + const src_bucket_keys = []; + const target_bucket_keys = []; mocha.before('init scanner & populate buckets', async function() { process.env.REPLICATION_MAX_KEYS = "6"; // create buckets @@ -878,7 +878,7 @@ mocha.describe('Replication pagination test', function() { // populate source bucket for (let i = 0; i < obj_amount; i++) { - let key = create_random_body(); + const key = create_random_body(); src_bucket_keys.push(key); await put_object(s3_owner, src_bucket, key); } diff --git a/src/test/unit_tests/test_bucketspace.js b/src/test/unit_tests/test_bucketspace.js index 84c3ac08ed..7f9d8a8109 100644 --- a/src/test/unit_tests/test_bucketspace.js +++ b/src/test/unit_tests/test_bucketspace.js @@ -23,7 +23,7 @@ const MAC_PLATFORM = 'darwin'; const inspect = (x, max_arr = 5) => util.inspect(x, { colors: true, depth: null, maxArrayLength: max_arr }); -let new_account_params = { +const new_account_params = { has_login: false, s3_access: true, }; @@ -49,7 +49,7 @@ mocha.describe('bucket operations - namespace_fs', function() { let s3_correct_uid; let s3_correct_uid_default_nsr; - let s3_creds = { + const s3_creds = { s3ForcePathStyle: true, signatureVersion: 'v4', computeChecksums: true, @@ -574,10 +574,10 @@ mocha.describe('bucket operations - namespace_fs', function() { await fs_utils.file_must_exist(path.join(tmp_fs_root, '/new_s3_buckets_dir')); }); mocha.it('delete account by uid, gid', async function() { - let read_account_resp1 = await rpc_client.account.read_account({ email: 'account_wrong_uid0@noobaa.com' }); + const read_account_resp1 = await rpc_client.account.read_account({ email: 'account_wrong_uid0@noobaa.com' }); assert.ok(read_account_resp1); // create another account with the same uid gid - let account_wrong_uid1 = await rpc_client.account.create_account({ + const account_wrong_uid1 = await rpc_client.account.create_account({ ...new_account_params, email: 'account_wrong_uid1@noobaa.com', name: 'account_wrong_uid1', @@ -599,17 +599,17 @@ mocha.describe('bucket operations - namespace_fs', function() { // check that both accounts deleted for (let i = 0; i < 2; i++) { try { - let deleted_account_exist = await rpc_client.account.read_account({ email: `account_wrong_uid${i}@noobaa.com` }); + const deleted_account_exist = await rpc_client.account.read_account({ email: `account_wrong_uid${i}@noobaa.com` }); assert.fail(`found account: ${deleted_account_exist} - account should be deleted`); } catch (err) { assert.ok(err.rpc_code === 'NO_SUCH_ACCOUNT'); } } - let list_account_resp2 = (await rpc_client.account.list_accounts({})).accounts; + const list_account_resp2 = (await rpc_client.account.list_accounts({})).accounts; assert.ok(list_account_resp2.length > 0); }); mocha.it('delete account by uid, gid - no such account', async function() { - let list_account_resp1 = (await rpc_client.account.list_accounts({})).accounts; + const list_account_resp1 = (await rpc_client.account.list_accounts({})).accounts; assert.ok(list_account_resp1.length > 0); try { await rpc_client.account.delete_account_by_property({ nsfs_account_config: { uid: 26041993, gid: 26041993 } }); @@ -641,8 +641,8 @@ function create_random_body() { function bucket_in_list(exist_buckets, not_exist_buckets, s3_buckets_list_response) { const bucket_names = s3_buckets_list_response.map(bucket => bucket.Name); - let exist_checker = exist_buckets.every(v => bucket_names.includes(v)); - let doesnt_exist_checker = not_exist_buckets.every(v => !bucket_names.includes(v)); + const exist_checker = exist_buckets.every(v => bucket_names.includes(v)); + const doesnt_exist_checker = not_exist_buckets.every(v => !bucket_names.includes(v)); return exist_checker && doesnt_exist_checker; } @@ -682,7 +682,7 @@ mocha.describe('list objects - namespace_fs', function() { let s3_uid26041993; let s3_uid6; - let s3_creds = { + const s3_creds = { s3ForcePathStyle: true, signatureVersion: 'v4', computeChecksums: true, @@ -911,7 +911,7 @@ mocha.describe('nsfs account configurations', function() { const regular_bucket_name = ['regular-bucket', 'regular-bucket1', 'regular-bucket2']; const regular_bucket_fail = ['regular-bucket-fail', 'regular-bucket-fail1', 'regular-bucket-fail2']; const data_bucket = 'data-bucket'; - let s3_creds = { + const s3_creds = { s3ForcePathStyle: true, signatureVersion: 'v4', computeChecksums: true, @@ -983,8 +983,8 @@ mocha.describe('nsfs account configurations', function() { account_nsfs_only3: { default_resource: nsr1, nsfs_only: true } }; for (const name of Object.keys(names_and_default_resources)) { - let config1 = names_and_default_resources[name]; - let cur_account = await rpc_client.account.create_account({ + const config1 = names_and_default_resources[name]; + const cur_account = await rpc_client.account.create_account({ ...new_account_params, email: `${name}@noobaa.io`, name: name, @@ -999,7 +999,7 @@ mocha.describe('nsfs account configurations', function() { s3_creds.accessKeyId = cur_account.access_keys[0].access_key.unwrap(); s3_creds.secretAccessKey = cur_account.access_keys[0].secret_key.unwrap(); s3_creds.endpoint = coretest.get_http_address(); - let cur_s3_account = new AWS.S3(s3_creds); + const cur_s3_account = new AWS.S3(s3_creds); accounts[name] = cur_s3_account; } }); diff --git a/src/test/unit_tests/test_bucketspace_versioning.js b/src/test/unit_tests/test_bucketspace_versioning.js index a9a9c2efec..a9d54a2e3e 100644 --- a/src/test/unit_tests/test_bucketspace_versioning.js +++ b/src/test/unit_tests/test_bucketspace_versioning.js @@ -43,7 +43,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { let s3_uid5; let s3_uid6; let s3_admin; - let accounts = []; + const accounts = []; const disabled_key = 'disabled_key.txt'; const key1 = 'key1.txt'; const copied_key1 = 'copied_key1.txt'; @@ -131,7 +131,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.after(async () => { fs_utils.folder_delete(tmp_fs_root); - for (let email of accounts) { + for (const email of accounts) { await rpc_client.account.delete_account({ email }); } }); @@ -773,9 +773,9 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('delete multiple objects - no version id - versioning disabled', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(80000); - let keys = []; + const keys = []; for (let i = 0; i < 50; i++) { - let random_key = (Math.random() + 1).toString(36).substring(7); + const random_key = (Math.random() + 1).toString(36).substring(7); keys.push(random_key); await upload_object_versions(account_with_access, delete_multi_object_test_bucket, random_key, ['null']); } @@ -784,13 +784,13 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { Bucket: delete_multi_object_test_bucket, Delete: { Objects: to_delete_arr } }).promise(); assert.equal(delete_res.Deleted.length, 50); assert.deepStrictEqual(delete_res.Deleted, to_delete_arr); - for (let res of delete_res.Deleted) { + for (const res of delete_res.Deleted) { assert.equal(res.DeleteMarker, undefined); assert.equal(res.VersionId, undefined); } const versions_dir = path.join(full_multi_delete_path, '.versions'); await fs_utils.file_must_not_exist(versions_dir); - let objects = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, full_multi_delete_path); + const objects = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, full_multi_delete_path); assert.equal(objects.length, 1); assert.ok(objects[0].name.startsWith('.noobaa-nsfs_')); @@ -799,23 +799,23 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('delete multiple objects - no version id', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - let versions_type_arr = ['null']; + const versions_type_arr = ['null']; for (let i = 0; i < 300; i++) { versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); } await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); - let arr = []; + const arr = []; for (let i = 0; i < 200; i++) { arr.push({ Key: 'a' }); } const delete_res = await account_with_access.deleteObjects({ Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); assert.equal(delete_res.Deleted.length, 200); - for (let res of delete_res.Deleted) { + for (const res of delete_res.Deleted) { assert.equal(res.DeleteMarker, true); } const versions_dir = path.join(full_multi_delete_path, '.versions'); - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); assert.equal(versions.length, 501); await delete_object_versions(full_multi_delete_path, key1); await delete_object_versions(full_multi_delete_path, 'a'); @@ -824,26 +824,26 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('delete multiple objects - delete only delete markers', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - let versions_type_arr = []; + const versions_type_arr = []; for (let i = 0; i < 300; i++) { versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); } - let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); - let arr = []; + const put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + const arr = []; for (let i = 0; i < 300; i++) { if (i % 2 === 1) arr.push({ Key: key1, VersionId: put_res[i].VersionId }); } const delete_res = await account_with_access.deleteObjects({ Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); assert.equal(delete_res.Deleted.length, 150); - for (let res of delete_res.Deleted) { + for (const res of delete_res.Deleted) { assert.equal(res.DeleteMarker, true); } const versions_dir = path.join(full_multi_delete_path, '.versions'); - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); assert.equal(versions.length, 149); await fs_utils.file_must_exist(path.join(full_multi_delete_path, key1)); - let latest_stat = await stat_and_get_all(full_multi_delete_path, key1); + const latest_stat = await stat_and_get_all(full_multi_delete_path, key1); assert.equal(latest_stat.xattr[XATTR_VERSION_ID], put_res[298].VersionId); await delete_object_versions(full_multi_delete_path, key1); }); @@ -852,13 +852,13 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); const key2 = 'key2'; - let versions_type_arr = []; + const versions_type_arr = []; for (let i = 0; i < 300; i++) { versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); } - let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); - let put_res2 = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key2, versions_type_arr); - let arr = []; + const put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + const put_res2 = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key2, versions_type_arr); + const arr = []; for (let i = 0; i < 300; i++) { if (i % 2 === 0) arr.push({ Key: key1, VersionId: put_res[i].VersionId }); if (i % 2 === 1) arr.push({ Key: key2, VersionId: put_res2[i].VersionId }); @@ -866,19 +866,19 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { const delete_res = await account_with_access.deleteObjects({ Bucket: delete_multi_object_test_bucket, Delete: { Objects: arr } }).promise(); assert.equal(delete_res.Deleted.length, 300); - for (let res of delete_res.Deleted.slice(0, 150)) { + for (const res of delete_res.Deleted.slice(0, 150)) { assert.equal(res.DeleteMarker, undefined); } - for (let res of delete_res.Deleted.slice(150)) { + for (const res of delete_res.Deleted.slice(150)) { assert.equal(res.DeleteMarker, true); } const versions_dir = path.join(full_multi_delete_path, '.versions'); - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); // 150 of key1 and 149 of key2 (latest version of key2 is in the parent dir) assert.equal(versions.length, 299); await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); await fs_utils.file_must_exist(path.join(full_multi_delete_path, key2)); - let latest_dm_version = await find_max_version_past(full_multi_delete_path, key1); + const latest_dm_version = await find_max_version_past(full_multi_delete_path, key1); const version_path = path.join(full_multi_delete_path, '.versions', key1 + '_' + latest_dm_version); const version_info = await stat_and_get_all(version_path, ''); assert.equal(version_info.xattr[XATTR_DELETE_MARKER], 'true'); @@ -890,12 +890,12 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('delete multiple objects - delete regular versions & delete markers - new latest is dm', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - let versions_type_arr = []; + const versions_type_arr = []; for (let i = 0; i < 300; i++) { versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); } - let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); - let arr = []; + const put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + const arr = []; for (let i = 200; i < 300; i++) { arr.push({ Key: key1, VersionId: put_res[i].VersionId }); } @@ -907,10 +907,10 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { if (i % 2 === 0) assert.equal(delete_res.Deleted[i].DeleteMarker, undefined); } const versions_dir = path.join(full_multi_delete_path, '.versions'); - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); assert.equal(versions.length, 200); await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); - let latest_dm_version = await find_max_version_past(full_multi_delete_path, key1); + const latest_dm_version = await find_max_version_past(full_multi_delete_path, key1); const version_path = path.join(full_multi_delete_path, '.versions', key1 + '_' + latest_dm_version); const version_info = await stat_and_get_all(version_path, ''); assert.equal(version_info.xattr[XATTR_VERSION_ID], put_res[199].VersionId); @@ -920,12 +920,12 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('delete multiple objects - delete regular versions & delete markers - new latest is regular version', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - let versions_type_arr = []; + const versions_type_arr = []; for (let i = 0; i < 300; i++) { versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); } - let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); - let arr = []; + const put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + const arr = []; for (let i = 100; i < 200; i++) { arr.push({ Key: key1, VersionId: put_res[i].VersionId }); } @@ -938,11 +938,11 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { if (i % 2 === 0) assert.equal(delete_res.Deleted[i].DeleteMarker, undefined); } const versions_dir = path.join(full_multi_delete_path, '.versions'); - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); assert.equal(versions.length, 198); await fs_utils.file_must_exist(path.join(full_multi_delete_path, key1)); - let latest_stat = await stat_and_get_all(full_multi_delete_path, key1); + const latest_stat = await stat_and_get_all(full_multi_delete_path, key1); assert.equal(latest_stat.xattr[XATTR_VERSION_ID], put_res[298].VersionId); await delete_object_versions(full_multi_delete_path, key1); }); @@ -950,12 +950,12 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('delete multiple objects - delete keys & regular versions & delete markers ', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - let versions_type_arr = []; + const versions_type_arr = []; for (let i = 0; i < 300; i++) { versions_type_arr.push(i % 2 === 0 ? 'regular' : 'delete_marker'); } - let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); - let arr = []; + const put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + const arr = []; for (let i = 0; i < 50; i++) { arr.push({ Key: key1 }); } @@ -975,7 +975,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { if (i % 2 === 0) assert.equal(delete_res.Deleted[i].DeleteMarker, undefined); } const versions_dir = path.join(full_multi_delete_path, '.versions'); - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); assert.equal(versions.length, 250); await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); @@ -986,12 +986,12 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.it('delete multiple objects - delete regular versions & delete markers & latest & keys- ', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - let versions_type_arr = []; + const versions_type_arr = []; for (let i = 0; i < 300; i++) { versions_type_arr.push(i % 2 === 1 ? 'regular' : 'delete_marker'); } - let put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); - let arr = []; + const put_res = await upload_object_versions(account_with_access, delete_multi_object_test_bucket, key1, versions_type_arr); + const arr = []; for (let i = 200; i < 300; i++) { arr.push({ Key: key1, VersionId: put_res[i].VersionId }); } @@ -1011,7 +1011,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { assert.equal(delete_res.Deleted[i].DeleteMarker, true); } const versions_dir = path.join(full_multi_delete_path, '.versions'); - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); assert.equal(versions.length, 250); await fs_utils.file_must_not_exist(path.join(full_multi_delete_path, key1)); @@ -1028,7 +1028,7 @@ async function delete_object_versions(bucket_path, key) { // delete past versions const versions_dir = path.join(bucket_path, '.versions'); try { - let versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); + const versions = await nb_native().fs.readdir(DEFAULT_FS_CONFIG, versions_dir); for (const entry of versions) { if (entry.name.startsWith(key)) { @@ -1043,7 +1043,7 @@ async function delete_object_versions(bucket_path, key) { } async function upload_object_versions(s3_client, bucket, key, object_types_arr) { - let res = []; + const res = []; const versioning_status = await s3_client.getBucketVersioning({ Bucket: bucket }).promise(); for (const obj_type of object_types_arr) { if (obj_type === 'regular' || obj_type === 'null') { @@ -1213,7 +1213,7 @@ async function generate_nsfs_account(options = {}) { nsfs_only: nsfs_only || false }; - let account = await rpc_client.account.create_account({ + const account = await rpc_client.account.create_account({ has_login: false, s3_access: true, email: `${random_name}@noobaa.com`, diff --git a/src/test/unit_tests/test_chunk_coder.js b/src/test/unit_tests/test_chunk_coder.js index 0a5a6274ca..3162b5d8b9 100644 --- a/src/test/unit_tests/test_chunk_coder.js +++ b/src/test/unit_tests/test_chunk_coder.js @@ -306,7 +306,7 @@ async function test_stream({ erase, decode, generator, input_size, chunk_split_c } }); - let transforms = [input, + const transforms = [input, splitter, coder, ]; @@ -334,7 +334,7 @@ function call_chunk_coder_must_succeed(coder, chunk) { } function call_chunk_coder_must_fail(coder, chunk) { - var err; + let err; try { nb_native().chunk_coder(coder, chunk); } catch (err1) { @@ -350,7 +350,7 @@ function call_chunk_coder_must_fail(coder, chunk) { function throw_chunk_err(err) { if (!err.chunks) throw err; - var message = ''; + let message = ''; for (const chunk of err.chunks) { message += 'CHUNK ERRORS: ' + chunk.errors.join(',') + '\n'; } diff --git a/src/test/unit_tests/test_debug_module.js b/src/test/unit_tests/test_debug_module.js index 80f09ee10b..6dc32b65d4 100644 --- a/src/test/unit_tests/test_debug_module.js +++ b/src/test/unit_tests/test_debug_module.js @@ -40,52 +40,52 @@ mocha.describe('debug_module', function() { // shouldn't the module trim the base path ?? mocha.it('should parse __filename', function() { //CI integration workaround - var filename = __filename.indexOf('noobaa-util') >= 0 ? + const filename = __filename.indexOf('noobaa-util') >= 0 ? __filename : '/Users/someuser/github/noobaa-core/src/util/test_debug_module.js'; - var dbg = new DebugModule(filename); + const dbg = new DebugModule(filename); assert.strictEqual(dbg._name, 'core.util.test_debug_module'); }); mocha.it('should parse heroku path names', function() { - var dbg = new DebugModule('/app/src/blabla'); + const dbg = new DebugModule('/app/src/blabla'); assert.strictEqual(dbg._name, 'core.blabla'); }); mocha.it('should parse file names with extension', function() { - var dbg = new DebugModule('/app/src/blabla.asd'); + const dbg = new DebugModule('/app/src/blabla.asd'); assert.strictEqual(dbg._name, 'core.blabla'); }); mocha.it('should parse file names with folder with extention', function() { - var dbg = new DebugModule('/app/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/app/src/blabla.asd/lll.asd'); assert.strictEqual(dbg._name, 'core.blabla.asd.lll'); }); mocha.it('should parse file names with stems', function() { - var dbg = new DebugModule('/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/noobaa-core/src/blabla.asd/lll.asd'); assert.strictEqual(dbg._name, 'core.blabla.asd.lll'); }); mocha.it('should parse file names with stems and prefix', function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); assert.strictEqual(dbg._name, 'core.blabla.asd.lll'); }); mocha.it('should parse windows style paths', function() { - var dbg = new DebugModule('C:\\Program Files\\NooBaa\\src\\agent\\agent_cli.js'); + const dbg = new DebugModule('C:\\Program Files\\NooBaa\\src\\agent\\agent_cli.js'); assert.strictEqual(dbg._name, 'core.agent.agent_cli'); }); mocha.it('should set level for windows style module and propogate', function() { - var dbg = new DebugModule('C:\\Program Files\\NooBaa\\src\\agent\\agent_cli.js'); + const dbg = new DebugModule('C:\\Program Files\\NooBaa\\src\\agent\\agent_cli.js'); dbg.set_module_level(3, 'C:\\Program Files\\NooBaa\\src\\agent'); assert.strictEqual(dbg._cur_level.__level, 3); }); mocha.it('should log when level is appropriate', function() { - var rotation_command = ''; + let rotation_command = ''; //no special handling on Darwin for now. ls as place holder if (os_utils.IS_MAC) { rotation_command = 'ls'; @@ -93,22 +93,22 @@ mocha.describe('debug_module', function() { rotation_command = '/usr/sbin/logrotate /etc/logrotate.d/noobaa'; } return os_utils.exec(rotation_command).then(function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); dbg.log0("test_debug_module: log0 should appear in the log"); return file_content_verify("text", "test_debug_module: log0 should appear in the log"); }); }); mocha.it('should NOT log when level is lower', function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); dbg.log2("test_debug_module: log2 should not appear in the log"); return file_content_verify("no_text", "test_debug_module: log2 should not appear in the log"); }); mocha.it('should log after changing level of module', function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); dbg.set_module_level(4); - var a = { + const a = { out: "out", second: { inner: "inner" @@ -120,13 +120,13 @@ mocha.describe('debug_module', function() { }); mocha.it('should log backtrace if asked to', function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); dbg.log0_withbt("test_debug_module: log0 should appear with backtrace"); return file_content_verify("text", "core.blabla.asd.lll:: test_debug_module: log0 should appear with backtrace at"); }); mocha.it('setting a higher module should affect sub module', function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); dbg.set_module_level(2, 'core'); dbg.log2("test_debug_module: log2 setting a higher level module level should affect current"); dbg.set_module_level(0, 'core'); @@ -134,21 +134,21 @@ mocha.describe('debug_module', function() { }); mocha.it('formatted string should be logged correctly (string substitutions)', function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); - var s1 = 'this'; - var s2 = 'should'; - var s3 = 'expected'; - var d1 = 3; - var d2 = 2; + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const s1 = 'this'; + const s2 = 'should'; + const s3 = 'expected'; + const d1 = 3; + const d2 = 2; dbg.log0("%s string substitutions (%d) %s be logged as %s, with two (%d) numbers", s1, d1, s2, s3, d2); return file_content_verify("text", " this string substitutions (3) should be logged as expected, with two (2) numbers"); }); mocha.it('console various logs should be logged as well', function() { - var syslog_levels = ["trace", "log", "info", "error"]; + const syslog_levels = ["trace", "log", "info", "error"]; return _.reduce(syslog_levels, function(promise, l) { return promise.then(function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); _.noop(dbg); // lint unused bypass console[l]("console - %s - should be captured", l); return file_content_verify("text", "CONSOLE:: console - " + l + " - should be captured"); @@ -157,7 +157,7 @@ mocha.describe('debug_module', function() { }); mocha.it('fake browser verify logging and console wrapping', function() { - var dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); + const dbg = new DebugModule('/web/noise/noobaa-core/src/blabla.asd/lll.asd'); dbg.log0("test_debug_module: browser should appear in the log"); return file_content_verify("text", "core.blabla.asd.lll:: test_debug_module: browser should appear in the log"); }); diff --git a/src/test/unit_tests/test_encryption.js b/src/test/unit_tests/test_encryption.js index d06bd23721..e469cc09e7 100644 --- a/src/test/unit_tests/test_encryption.js +++ b/src/test/unit_tests/test_encryption.js @@ -26,10 +26,10 @@ const BKT = `bucket.example`; mocha.describe('Encryption tests', function() { const { rpc_client, EMAIL, SYSTEM } = coretest; let response_account; - let accounts = []; - let buckets = []; - let namespace_buckets = []; - let namespace_resources = []; + const accounts = []; + const buckets = []; + const namespace_buckets = []; + const namespace_resources = []; mocha.describe('Check master keys in system', async function() { mocha.it('load system store', async function() { @@ -123,7 +123,7 @@ mocha.describe('Encryption tests', function() { mocha.it('create accounts and compare acount access keys succefully', async function() { this.timeout(600000); // eslint-disable-line no-invalid-this const db_system = await db_client.collection('systems').findOne({ name: SYSTEM }); - let new_account_params = { + const new_account_params = { has_login: false, s3_access: true, }; @@ -618,7 +618,7 @@ mocha.describe('Rotation tests', function() { }); mocha.it('create account after disable system master key test', async function() { this.timeout(600000); // eslint-disable-line no-invalid-this - let new_account_params = { + const new_account_params = { has_login: false, s3_access: true, email: 'account-after-disable-ststem', @@ -734,9 +734,9 @@ mocha.describe('Rotation tests', function() { this.timeout(600000); // eslint-disable-line no-invalid-this await system_store.load(); // collect old data - let old_accounts = []; - let old_buckets = []; - let old_pools = []; + const old_accounts = []; + const old_buckets = []; + const old_pools = []; await P.all(_.map(system_store.data.accounts, async account => { if (!account.access_keys && account.email.unwrap() === "support@noobaa.com") { return; @@ -1013,10 +1013,10 @@ async function namespace_cache_tests(rpc_client, namespace_resources, sys_name, } async function populate_system(rpc_client) { - let accounts = []; - let buckets = []; + const accounts = []; + const buckets = []; - let new_account_params = { + const new_account_params = { has_login: false, s3_access: true, }; @@ -1077,7 +1077,7 @@ async function populate_system(rpc_client) { } async function create_delete_external_connections(rpc_client) { - let new_account_params = { + const new_account_params = { has_login: false, s3_access: true, }; diff --git a/src/test/unit_tests/test_fs_utils.js b/src/test/unit_tests/test_fs_utils.js index eac31043b3..09eb3542dc 100644 --- a/src/test/unit_tests/test_fs_utils.js +++ b/src/test/unit_tests/test_fs_utils.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -// var _ = require('lodash'); -var mocha = require('mocha'); -var assert = require('assert'); +// const _ = require('lodash'); +const mocha = require('mocha'); +const assert = require('assert'); -var fs_utils = require('../../util/fs_utils'); +const fs_utils = require('../../util/fs_utils'); function log(...args) { if (process.env.SUPPRESS_LOGS) return; diff --git a/src/test/unit_tests/test_keys_lock.js b/src/test/unit_tests/test_keys_lock.js index f26eda9dfb..69c4d89d3c 100644 --- a/src/test/unit_tests/test_keys_lock.js +++ b/src/test/unit_tests/test_keys_lock.js @@ -1,22 +1,22 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -// var _ = require('lodash'); -var P = require('../../util/promise'); -var mocha = require('mocha'); -var assert = require('assert'); -var KeysLock = require('../../util/keys_lock'); +// const _ = require('lodash'); +const P = require('../../util/promise'); +const mocha = require('mocha'); +const assert = require('assert'); +const KeysLock = require('../../util/keys_lock'); mocha.describe('keys_lock', function() { mocha.it('should create ok', function() { - var kl = new KeysLock(); + const kl = new KeysLock(); assert.strictEqual(kl.length, 0); }); mocha.it('should lock key', function() { - var kl; - var first_woke = false; + let kl; + let first_woke = false; function do_wake() { return P.resolve() @@ -66,8 +66,8 @@ mocha.describe('keys_lock', function() { mocha.it('should work parallel keys', function() { - var kl; - var first_woke = false; + let kl; + let first_woke = false; function do_wake_first() { return P.resolve() diff --git a/src/test/unit_tests/test_lifecycle.js b/src/test/unit_tests/test_lifecycle.js index c053b3e590..017f0b4267 100644 --- a/src/test/unit_tests/test_lifecycle.js +++ b/src/test/unit_tests/test_lifecycle.js @@ -98,7 +98,7 @@ mocha.describe('lifecycle', () => { console.log('completeUploadResult', completeUploadResult); // go back in time - let create_time = new Date(); + const create_time = new Date(); create_time.setDate(create_time.getDate() - age); const update = { create_time, diff --git a/src/test/unit_tests/test_linked_list.js b/src/test/unit_tests/test_linked_list.js index 5fa9b94cb5..851d23ce3c 100644 --- a/src/test/unit_tests/test_linked_list.js +++ b/src/test/unit_tests/test_linked_list.js @@ -1,23 +1,23 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var mocha = require('mocha'); -var assert = require('assert'); -var LinkedList = require('../../util/linked_list'); +const _ = require('lodash'); +const mocha = require('mocha'); +const assert = require('assert'); +const LinkedList = require('../../util/linked_list'); mocha.describe('linked_list', function() { mocha.it('should create ok', function() { - var ll = new LinkedList(); + const ll = new LinkedList(); _.noop(ll); // lint unused bypass }); mocha.it('should handle single item', function() { - var ll = new LinkedList(); + const ll = new LinkedList(); assert(ll.is_empty()); assert.strictEqual(ll.length, 0); - var item = { + const item = { foo: 'bar' }; ll.push_front(item); @@ -25,7 +25,7 @@ mocha.describe('linked_list', function() { assert.strictEqual(ll.get_back(), item); assert(!ll.is_empty()); assert.strictEqual(ll.length, 1); - var pop_item = ll.pop_back(); + const pop_item = ll.pop_back(); assert.strictEqual(pop_item, item); assert.strictEqual(ll.length, 0); assert(ll.is_empty()); @@ -38,9 +38,9 @@ mocha.describe('linked_list', function() { }); mocha.it('should throw mixing item between lists', function() { - var l1 = new LinkedList(); - var l2 = new LinkedList(); - var item = {}; + const l1 = new LinkedList(); + const l2 = new LinkedList(); + const item = {}; l1.push_front(item); assert.throws(function() { l2.push_front(item); diff --git a/src/test/unit_tests/test_lru.js b/src/test/unit_tests/test_lru.js index 8380aa3ec7..77d6e3341c 100644 --- a/src/test/unit_tests/test_lru.js +++ b/src/test/unit_tests/test_lru.js @@ -87,7 +87,7 @@ mocha.describe('lru', function() { }); lru._sanity(); - let item = lru.find_or_add_item(1); + const item = lru.find_or_add_item(1); assert(item); assert.strictEqual(lru.usage, 0); assert.strictEqual(item.usage, 1); @@ -98,7 +98,7 @@ mocha.describe('lru', function() { assert.strictEqual(lru.usage, 0); lru._sanity(); - let item1 = lru.find_or_add_item(1); + const item1 = lru.find_or_add_item(1); assert(item1 !== item); lru._sanity(); }); @@ -111,8 +111,8 @@ mocha.describe('lru', function() { lru._sanity(); for (let i = 0; i < 1000; ++i) { - let key = Math.floor(100 * Math.random()); - let item = lru.find_or_add_item(key); + const key = Math.floor(100 * Math.random()); + const item = lru.find_or_add_item(key); lru.set_usage(item, Math.floor(MAX_USAGE * Math.random())); lru._sanity(); } diff --git a/src/test/unit_tests/test_mapper.js b/src/test/unit_tests/test_mapper.js index c030761f9b..9581a41468 100644 --- a/src/test/unit_tests/test_mapper.js +++ b/src/test/unit_tests/test_mapper.js @@ -633,7 +633,7 @@ coretest.describe_mapper_test_case({ } function make_block(params = {}, i = 0) { - let { + const { readable, writable, is_cloud_node, diff --git a/src/test/unit_tests/test_mdsequence.js b/src/test/unit_tests/test_mdsequence.js index 1f4910b8ea..0f72ddd2a6 100644 --- a/src/test/unit_tests/test_mdsequence.js +++ b/src/test/unit_tests/test_mdsequence.js @@ -7,7 +7,7 @@ const assert = require('assert'); const { MongoSequence } = require('../../util/mongo_client'); async function get_postgres_client(params) { - let pgc = new PostgresClient(params); + const pgc = new PostgresClient(params); await pgc.connect(); return pgc; } diff --git a/src/test/unit_tests/test_namespace_cache.js b/src/test/unit_tests/test_namespace_cache.js index f7b349279b..9494a20faf 100644 --- a/src/test/unit_tests/test_namespace_cache.js +++ b/src/test/unit_tests/test_namespace_cache.js @@ -598,13 +598,13 @@ mocha.describe('namespace caching: read scenarios and fresh objects', () => { const size = block_size * 5; const obj = random_object(size); hub.add_obj(obj); - let params = { + const params = { bucket: obj.bucket, key: obj.key, md_conditions: { if_match_etag: 'match etag' }, }; - let object_md = await ns_cache.read_object_md(params, object_sdk); + const object_md = await ns_cache.read_object_md(params, object_sdk); params.object_md = object_md; await ns_cache.read_object_stream(params, object_sdk); @@ -987,17 +987,17 @@ mocha.describe('namespace caching: range read scenarios', () => { const size = block_size * 5; const obj = random_object(size); hub.add_obj(obj); - let start = block_size + 100; + const start = block_size + 100; // end is exclusive - let end = start + 100; - let params = { + const end = start + 100; + const params = { bucket: obj.bucket, key: obj.key, start, end, }; - let object_md = await ns_cache.read_object_md(params, object_sdk); + const object_md = await ns_cache.read_object_md(params, object_sdk); params.object_md = object_md; try { await ns_cache.read_object_stream(params, object_sdk); @@ -1027,10 +1027,10 @@ mocha.describe('namespace caching: range read scenarios', () => { const size = block_size * 5; const obj = random_object(size); hub.add_obj(obj); - let start = block_size + 100; + const start = block_size + 100; // end is exclusive - let end = start + 100; - let params = { + const end = start + 100; + const params = { bucket: obj.bucket, key: obj.key, start, @@ -1038,7 +1038,7 @@ mocha.describe('namespace caching: range read scenarios', () => { md_conditions: { if_match_etag: obj.etag }, }; - let object_md = await ns_cache.read_object_md(params, object_sdk); + const object_md = await ns_cache.read_object_md(params, object_sdk); params.object_md = object_md; await ns_cache.read_object_stream(params, object_sdk); @@ -1061,10 +1061,10 @@ mocha.describe('namespace caching: range read scenarios', () => { const size = block_size * 5; const obj = random_object(size); hub.add_obj(obj); - let start = block_size + 100; + const start = block_size + 100; // end is exclusive - let end = start + 100; - let params = { + const end = start + 100; + const params = { bucket: obj.bucket, key: obj.key, start, @@ -1072,7 +1072,7 @@ mocha.describe('namespace caching: range read scenarios', () => { md_conditions: { if_match_etag: 'non match etag' }, }; - let object_md = await ns_cache.read_object_md(params, object_sdk); + const object_md = await ns_cache.read_object_md(params, object_sdk); params.object_md = object_md; await ns_cache.read_object_stream(params, object_sdk); @@ -1094,10 +1094,10 @@ mocha.describe('namespace caching: range read scenarios', () => { const size = block_size * 5; const obj = random_object(size); hub.add_obj(obj); - let start = block_size + 100; + const start = block_size + 100; // end is exclusive - let end = start + 100; - let params = { + const end = start + 100; + const params = { bucket: obj.bucket, key: obj.key, start, @@ -1105,7 +1105,7 @@ mocha.describe('namespace caching: range read scenarios', () => { md_conditions: { if_unmodified_since: obj.last_modified }, }; - let object_md = await ns_cache.read_object_md(params, object_sdk); + const object_md = await ns_cache.read_object_md(params, object_sdk); params.object_md = object_md; await ns_cache.read_object_stream(params, object_sdk); diff --git a/src/test/unit_tests/test_namespace_fs.js b/src/test/unit_tests/test_namespace_fs.js index c6e63b32a8..c8b530d13d 100644 --- a/src/test/unit_tests/test_namespace_fs.js +++ b/src/test/unit_tests/test_namespace_fs.js @@ -264,7 +264,7 @@ mocha.describe('namespace_fs', function() { const dir2_version_dir = dir2 + `${version_dir}`; let s3_client; let s3_admin; - let accounts = []; + const accounts = []; const key = 'key'; const body = 'AAAA'; const version_key = 'version_key'; @@ -339,7 +339,7 @@ mocha.describe('namespace_fs', function() { mocha.after(async () => { fs_utils.folder_delete(tmp_fs_root); - for (let email of accounts) { + for (const email of accounts) { await rpc_client.account.delete_account({ email }); } }); @@ -375,7 +375,7 @@ mocha.describe('namespace_fs', function() { const versionID_3 = 'mtime-1i357k9-ino-13l57j9'; let s3_client; let s3_admin; - let accounts = []; + const accounts = []; const dis_version_key = 'dis_version'; const dis_version_body = 'AAAAA'; const en_version_key = 'en_version'; @@ -464,7 +464,7 @@ mocha.describe('namespace_fs', function() { mocha.after(async () => { if (file_pointer) await file_pointer.close(DEFAULT_FS_CONFIG, get_obj_path); fs_utils.folder_delete(tmp_fs_root); - for (let email of accounts) { + for (const email of accounts) { await rpc_client.account.delete_account({ email }); } }); @@ -1036,7 +1036,7 @@ mocha.describe('namespace_fs copy object', function() { mocha.describe('upload_object (copy)', function() { const upload_key = 'upload_key_1'; - let copy_xattr = {}; + const copy_xattr = {}; const copy_key_1 = 'copy_key_1'; const data = crypto.randomBytes(100); diff --git a/src/test/unit_tests/test_node_allocator.js b/src/test/unit_tests/test_node_allocator.js index e206aa3e01..49154c5e05 100644 --- a/src/test/unit_tests/test_node_allocator.js +++ b/src/test/unit_tests/test_node_allocator.js @@ -50,8 +50,8 @@ mocha.describe('node_allocator', function() { }) ])) .then(res => { - let allocation = res[0]; - let pool_nodes = res[1]; + const allocation = res[0]; + const pool_nodes = res[1]; assert(allocation.latency_groups.length === config.NODE_ALLOCATOR_NUM_CLUSTERS, 'KMEANS did not divide to correct K number of groups'); assert(_.every(allocation.latency_groups, group => group.nodes.length), 'KMEANS groups should have nodes'); const total_nodes_list = _.reduce(allocation.latency_groups, (sum, group) => sum + group.nodes.length, 0); diff --git a/src/test/unit_tests/test_ns_list_objects.js b/src/test/unit_tests/test_ns_list_objects.js index fbeda33df0..9cdb3fa114 100644 --- a/src/test/unit_tests/test_ns_list_objects.js +++ b/src/test/unit_tests/test_ns_list_objects.js @@ -386,7 +386,7 @@ function test_ns_list_objects(ns, object_sdk, bucket) { async function truncated_listing(params, use_upload_id_marker, upload_mode) { // Initialization of IsTruncated in order to perform the first while cycle - var res = { + const res = { is_truncated: true, objects: [], common_prefixes: [], diff --git a/src/test/unit_tests/test_object_io.js b/src/test/unit_tests/test_object_io.js index 72683a4bc1..435e6ed8dc 100644 --- a/src/test/unit_tests/test_object_io.js +++ b/src/test/unit_tests/test_object_io.js @@ -477,8 +477,8 @@ coretest.describe_mapper_test_case({ } function intersection(start1, end1, start2, end2) { - var start = start1 > start2 ? start1 : start2; - var end = end1 < end2 ? end1 : end2; + const start = start1 > start2 ? start1 : start2; + const end = end1 < end2 ? end1 : end2; return (end <= start) ? null : { start: start, end: end, @@ -500,7 +500,7 @@ coretest.describe_mapper_test_case({ const generated_missing_parts = []; cached_parts.sort((a, b) => a.start - b.end); for (const part of cached_parts) { - let part_range = intersection(part.start, part.end, pos, end); + const part_range = intersection(part.start, part.end, pos, end); console.log('create_missing_parts:', { part: _.omit(part, 'data'), part_range, pos }); if (!part_range) { if (end <= part.start) { @@ -519,8 +519,8 @@ coretest.describe_mapper_test_case({ generated_missing_parts.push({ start: pos, end: part_range.start, data }); } - let buffer_start = part_range.start - part.start; - let buffer_end = part_range.end - part.start; + const buffer_start = part_range.start - part.start; + const buffer_end = part_range.end - part.start; pos = part_range.end; buffers.push(part.data.slice(buffer_start, buffer_end)); diff --git a/src/test/unit_tests/test_postgres_client.js b/src/test/unit_tests/test_postgres_client.js index f92427aa06..1f53b2357d 100644 --- a/src/test/unit_tests/test_postgres_client.js +++ b/src/test/unit_tests/test_postgres_client.js @@ -62,7 +62,7 @@ const additional_properties = { a: 1, b: '2', c: 3.14 }; async function get_postgres_client(params) { - let pgc = new PostgresClient(params); + const pgc = new PostgresClient(params); await pgc.connect(); console.log('deleting old database', params.database); await pgc.dropDatabase(); @@ -181,7 +181,7 @@ mocha.describe('postgres_client', function() { mocha.it('should insert a single doc on multiple parallel upserts', async function() { - let upsert_table = postgres_client.define_collection({ + const upsert_table = postgres_client.define_collection({ name: `upsert_${test_table_name}`, schema: test_schema, // db_indexes: test_indexes, @@ -195,7 +195,7 @@ mocha.describe('postgres_client', function() { const num_upserts = 40; await P.map(_.times(num_upserts), async i => upsert_table.findOneAndUpdate(query, update, options)); // check that there is only one doc and the int_field is as num_upserts - let find_res = await upsert_table.find({}); + const find_res = await upsert_table.find({}); assert.strictEqual(find_res.length, 1, 'number of inserted documents must be 1'); }); diff --git a/src/test/unit_tests/test_prefetch.js b/src/test/unit_tests/test_prefetch.js index 1c508e577d..13c4895c0f 100644 --- a/src/test/unit_tests/test_prefetch.js +++ b/src/test/unit_tests/test_prefetch.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var P = require('../../util/promise'); -var mocha = require('mocha'); +const _ = require('lodash'); +const P = require('../../util/promise'); +const mocha = require('mocha'); // var assert = require('assert'); -var Prefetch = require('../../util/prefetch'); +const Prefetch = require('../../util/prefetch'); function log(...args) { if (process.env.SUPPRESS_LOGS) return; @@ -21,7 +21,7 @@ mocha.describe('prefetch', function() { low_length: 30, high_length: 32, load: async count => { - var n = count; + const n = count; log('... LOAD', n, '(' + count + ')', 'length', pr.length); await P.delay(5); log('>>> LOAD', n, '(' + count + ')', 'length', pr.length); diff --git a/src/test/unit_tests/test_range_stream.js b/src/test/unit_tests/test_range_stream.js index ec4fadf5d4..d1021c5642 100644 --- a/src/test/unit_tests/test_range_stream.js +++ b/src/test/unit_tests/test_range_stream.js @@ -96,8 +96,8 @@ mocha.describe('range_stream', function() { function _range_stream(start, end, in_bufs) { return new Promise((resolve, reject) => { - let bufs = []; - let stm = new RangeStream(start, end); + const bufs = []; + const stm = new RangeStream(start, end); stm.on('data', data => bufs.push(data)); stm.on('error', reject); stm.on('end', () => resolve(bufs)); diff --git a/src/test/unit_tests/test_rpc.js b/src/test/unit_tests/test_rpc.js index 5c3615b63b..7a7983c88c 100644 --- a/src/test/unit_tests/test_rpc.js +++ b/src/test/unit_tests/test_rpc.js @@ -177,7 +177,7 @@ mocha.describe('RPC', function() { }; // test data - var PARAMS = { + const PARAMS = { param1: '1', param2: 2, param3: true, @@ -200,20 +200,20 @@ mocha.describe('RPC', function() { } })), }; - var REPLY = { + const REPLY = { rest: ['IS', { fucking: 'aWeSoMe' }] }; - var ERROR_MESSAGE = 'THIS IS AN EXPECTED TEST ERROR'; - var ERROR_CODE = 'TEST_CODE'; - var schema = new RpcSchema(); + const ERROR_MESSAGE = 'THIS IS AN EXPECTED TEST ERROR'; + const ERROR_CODE = 'TEST_CODE'; + const schema = new RpcSchema(); schema.register_api(test_api); schema.register_api(common_test_api); schema.compile(); - var rpc; - var client; + let rpc; + let client; function make_server() { const server = { @@ -266,7 +266,7 @@ mocha.describe('RPC', function() { mocha.it('should detect api with bad method', function() { assert.throws(function() { - var bad_schema = new RpcSchema(); + const bad_schema = new RpcSchema(); bad_schema.register_api({ $id: 'test_bad_api', methods: { @@ -310,7 +310,7 @@ mocha.describe('RPC', function() { }); mocha.it('should work on mock server', function() { - var server = { + const server = { get: function() { /* Empty Func */ }, put: function() { /* Empty Func */ }, post: function() { /* Empty Func */ }, @@ -449,7 +449,7 @@ mocha.describe('RPC', function() { return rpc.register_tcp_transport(0) .then(tcp_server_arg => { tcp_server = tcp_server_arg; - var tcp_client = rpc.new_client({ + const tcp_client = rpc.new_client({ address: 'tcp://localhost:' + tcp_server.port }); return tcp_client.test.get(_.cloneDeep(PARAMS)); @@ -466,7 +466,7 @@ mocha.describe('RPC', function() { .then(() => rpc.register_tcp_transport(0, ssl_utils.generate_ssl_certificate())) .then(tls_server_arg => { tls_server = tls_server_arg; - var tls_client = rpc.new_client({ + const tls_client = rpc.new_client({ address: 'tls://localhost:' + tls_server.port }); return tls_client.test.get(_.cloneDeep(PARAMS)); @@ -508,13 +508,13 @@ mocha.describe('RPC', function() { rpc.register_service(test_api, make_server()); let tcp_server; const ADDR = 'n2n://testrpc'; - let n2n_agent = rpc.register_n2n_agent(params => rpc.accept_n2n_signal(params)); + const n2n_agent = rpc.register_n2n_agent(params => rpc.accept_n2n_signal(params)); n2n_agent.set_rpc_address(ADDR); n2n_agent.update_n2n_config(n2n_config); return rpc.register_tcp_transport(0) .then(tcp_server_arg => { tcp_server = tcp_server_arg; - var n2n_client = rpc.new_client({ + const n2n_client = rpc.new_client({ address: ADDR }); return n2n_client.test.get(_.cloneDeep(PARAMS)); diff --git a/src/test/unit_tests/test_s3_bucket_policy.js b/src/test/unit_tests/test_s3_bucket_policy.js index 636ebf218b..a34034009b 100644 --- a/src/test/unit_tests/test_s3_bucket_policy.js +++ b/src/test/unit_tests/test_s3_bucket_policy.js @@ -365,7 +365,6 @@ mocha.describe('s3_bucket_policy', function() { mocha.it('should be able to put versionning when bucket policy permits', async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(15000); - let version_id; const policy = { Version: '2012-10-17', Statement: [{ @@ -399,7 +398,7 @@ mocha.describe('s3_bucket_policy', function() { Status: 'Enabled' } }).promise(); - version_id = (await s3_a.putObject({ + const version_id = (await s3_a.putObject({ Body: 'Some data for the file... bla bla bla... version II', Bucket: BKT, Key: KEY diff --git a/src/test/unit_tests/test_s3_list_objects.js b/src/test/unit_tests/test_s3_list_objects.js index 85faec34a3..31700c34e4 100644 --- a/src/test/unit_tests/test_s3_list_objects.js +++ b/src/test/unit_tests/test_s3_list_objects.js @@ -14,7 +14,7 @@ const P = require('../../util/promise'); const ObjectIO = require('../../sdk/object_io'); const { rpc_client } = coretest; -let object_io = new ObjectIO(); +const object_io = new ObjectIO(); object_io.set_verification_mode(); const assert = require('assert'); @@ -22,18 +22,18 @@ const BKT = 'first.bucket'; // the default bucket name mocha.describe('s3_list_objects', function() { - let files_without_folders_to_upload = []; - let folders_to_upload = []; - let files_in_folders_to_upload = []; - let files_in_utf_diff_delimiter = []; - let max_keys_objects = []; - let files_in_multipart_folders_to_upload = []; - let same_multipart_file1 = []; - let same_multipart_file2 = []; - let small_folder_with_multipart = []; - let prefix_infinite_loop_test = []; - - var i = 0; + const files_without_folders_to_upload = []; + const folders_to_upload = []; + const files_in_folders_to_upload = []; + const files_in_utf_diff_delimiter = []; + const max_keys_objects = []; + const files_in_multipart_folders_to_upload = []; + const same_multipart_file1 = []; + const same_multipart_file2 = []; + const small_folder_with_multipart = []; + const prefix_infinite_loop_test = []; + + let i = 0; for (i = 0; i < 264; i++) { folders_to_upload.push(`folder${i}/`); } @@ -70,8 +70,8 @@ mocha.describe('s3_list_objects', function() { mocha.it('issue use case', function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(10 * 60 * 1000); - let issue_files_folders_to_upload = ['20220323/99/test.txt', '20220323/990/test.txt']; - let expected_files_uploaded = ['20220323/99/', '20220323/990/']; + const issue_files_folders_to_upload = ['20220323/99/test.txt', '20220323/990/test.txt']; + const expected_files_uploaded = ['20220323/99/', '20220323/990/']; return run_case(issue_files_folders_to_upload, async function(server_upload_response) { @@ -359,12 +359,12 @@ mocha.describe('s3_list_objects', function() { same_multipart_file2), async function(server_upload_response) { // Uploading zero size objects from the key arrays that were provided - let list_reply = await rpc_client.object.list_uploads({ + const list_reply = await rpc_client.object.list_uploads({ bucket: BKT, delimiter: '/', limit: 25 }); - let objects = _.map(list_reply.objects, obj => obj.key); + const objects = _.map(list_reply.objects, obj => obj.key); assert.strictEqual(_.difference(['multipart2/'], list_reply.common_prefixes).length, 0, 'prefixes: ' + list_reply.common_prefixes); assert.strictEqual(_.difference(_.concat(same_multipart_file1, same_multipart_file2), @@ -384,13 +384,13 @@ mocha.describe('s3_list_objects', function() { prefix_infinite_loop_test, async function(server_upload_response) { // Uploading zero size objects from the key arrays that were provided - let list_reply = await truncated_listing({ + const list_reply = await truncated_listing({ bucket: BKT, prefix: 'd/', delimiter: '/', limit: 1, }, /* use_upload_id_marker = */ false, /* upload_mode = */ false); - let objects = _.map(list_reply.objects, obj => obj.key); + const objects = _.map(list_reply.objects, obj => obj.key); assert.strictEqual(_.difference(['d/d/'], list_reply.common_prefixes).length, 0, 'prefixes: ' + list_reply.common_prefixes); assert.strictEqual(_.difference(['d/f'], objects).length, 0, 'objects:' + objects); assert.strictEqual((list_reply.common_prefixes.length + objects.length), 2); @@ -471,14 +471,14 @@ async function run_case(array_of_names, case_func, only_initiate) { async function truncated_listing(params, use_upload_id_marker, upload_mode) { // Initialization of IsTruncated in order to perform the first while cycle - var listObjectsResponse = { + const listObjectsResponse = { is_truncated: true, objects: [], common_prefixes: [], key_marker: '' }; - var query_obj = { + const query_obj = { key_marker: listObjectsResponse.key_marker }; @@ -495,7 +495,7 @@ async function truncated_listing(params, use_upload_id_marker, upload_mode) { await rpc_client.object.list_objects(func_params); listObjectsResponse.is_truncated = res.is_truncated; - let res_list = { + const res_list = { objects: res.objects, common_prefixes: res.common_prefixes }; diff --git a/src/test/unit_tests/test_s3_ops.js b/src/test/unit_tests/test_s3_ops.js index 546d5c5cca..183a4e7266 100644 --- a/src/test/unit_tests/test_s3_ops.js +++ b/src/test/unit_tests/test_s3_ops.js @@ -18,7 +18,7 @@ const P = require('../../util/promise'); // If any of these variables are not defined, // use the noobaa endpoint to create buckets // for namespace cache bucket testing. -let USE_REMOTE_ENDPOINT = process.env.USE_REMOTE_ENDPOINT === 'true'; +const USE_REMOTE_ENDPOINT = process.env.USE_REMOTE_ENDPOINT === 'true'; const { rpc_client, EMAIL } = coretest; const BKT1 = 'test-s3-ops-bucket-ops'; const BKT2 = 'test-s3-ops-object-ops'; @@ -213,7 +213,7 @@ mocha.describe('s3_ops', function() { } }; let httpStatus; - var notSupported = false; + let notSupported = false; try { await s3.putObjectTagging(params).on('complete', function(response) { httpStatus = response.httpResponse.statusCode; @@ -271,7 +271,7 @@ mocha.describe('s3_ops', function() { } }; let httpStatus; - var notSupported = false; + let notSupported = false; try { await s3.putObjectTagging(params).on('complete', function(response) { @@ -461,7 +461,7 @@ mocha.describe('s3_ops', function() { const res6 = await s3.listMultipartUploads({ Bucket: bucket_name }).promise(); - var UploadId = _.result(_.find(res6.Uploads, function(obj) { + const UploadId = _.result(_.find(res6.Uploads, function(obj) { return obj.UploadId === res1.UploadId; }), 'UploadId'); if (!is_namespace_blob_bucket(bucket_type, remote_endpoint_options && remote_endpoint_options.endpoint_type)) { @@ -515,7 +515,7 @@ mocha.describe('s3_ops', function() { const res6 = await s3.listMultipartUploads({ Bucket: bucket_name }).promise(); - var UploadId = _.result(_.find(res6.Uploads, function(obj) { + const UploadId = _.result(_.find(res6.Uploads, function(obj) { return obj.UploadId === res1.UploadId; }), 'UploadId'); if (!is_namespace_blob_bucket(bucket_type, remote_endpoint_options && remote_endpoint_options.endpoint_type)) { diff --git a/src/test/unit_tests/test_sts.js b/src/test/unit_tests/test_sts.js index 79cad527bf..b939fc5114 100644 --- a/src/test/unit_tests/test_sts.js +++ b/src/test/unit_tests/test_sts.js @@ -158,7 +158,7 @@ mocha.describe('STS tests', function() { mocha.after(async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - for (let email of accounts) { + for (const email of accounts) { await rpc_client.account.delete_account({ email }); } }); @@ -210,16 +210,16 @@ mocha.describe('STS tests', function() { validate_assume_role_response(json, `arn:aws:sts::${user_b_key}:assumed-role/${role_b}/${params.RoleSessionName}`, `${user_b_key}:${params.RoleSessionName}`, user_b_key); - let temp_creds = validate_assume_role_response(json, `arn:aws:sts::${user_b_key}:assumed-role/${role_b}/${params.RoleSessionName}`, + const temp_creds = validate_assume_role_response(json, `arn:aws:sts::${user_b_key}:assumed-role/${role_b}/${params.RoleSessionName}`, `${user_b_key}:${params.RoleSessionName}`, user_b_key); - let s3 = new AWS.S3({ + const s3 = new AWS.S3({ ...sts_creds, accessKeyId: temp_creds.access_key, secretAccessKey: temp_creds.secret_key, sessionToken: temp_creds.session_token, endpoint: coretest.get_https_address(), }); - let list_objects_res = await s3.listObjects({ Bucket: 'first.bucket' }).promise(); + const list_objects_res = await s3.listObjects({ Bucket: 'first.bucket' }).promise(); assert.ok(list_objects_res); }); @@ -386,11 +386,11 @@ async function assume_role_and_parse_xml(sts, params) { function validate_assume_role_response(json, expected_arn, expected_role_id, assumed_access_key) { dbg.log0('test.sts.validate_assume_role_response: ', json); assert.ok(json && json.AssumeRoleResponse && json.AssumeRoleResponse.AssumeRoleResult); - let result = json.AssumeRoleResponse.AssumeRoleResult[0]; + const result = json.AssumeRoleResponse.AssumeRoleResult[0]; assert.ok(result); // validate credentials - let credentials = result.Credentials[0]; + const credentials = result.Credentials[0]; assert.ok(credentials && credentials.AccessKeyId[0] && credentials.SecretAccessKey[0]); assert.equal(credentials.Expiration[0], config.STS_DEFAULT_SESSION_TOKEN_EXPIRY_MS); if (config.STS_DEFAULT_SESSION_TOKEN_EXPIRY_MS !== 0) { @@ -399,7 +399,7 @@ function validate_assume_role_response(json, expected_arn, expected_role_id, ass } // validate assumed role user - let assumed_role_user = result.AssumedRoleUser[0]; + const assumed_role_user = result.AssumedRoleUser[0]; assert.equal(assumed_role_user.Arn[0], expected_arn); assert.equal(assumed_role_user.AssumedRoleId[0], expected_role_id); @@ -428,7 +428,7 @@ async function assert_throws_async(promise, } function verify_session_token(session_token, access_key, secret_key, assumed_role_access_key) { - let session_token_json = jwt_utils.authorize_jwt_token(session_token); + const session_token_json = jwt_utils.authorize_jwt_token(session_token); assert.equal(access_key, session_token_json.access_key); assert.equal(secret_key, session_token_json.secret_key); assert.equal(assumed_role_access_key, session_token_json.assumed_role_access_key); @@ -436,17 +436,17 @@ function verify_session_token(session_token, access_key, secret_key, assumed_rol mocha.describe('Session token tests', function() { const { rpc_client } = coretest; - let alice2 = 'alice2'; - let bob2 = 'bob2'; - let charlie2 = 'charlie2'; - let accounts = [{ email: alice2 }, { email: bob2 }, { email: charlie2 }]; + const alice2 = 'alice2'; + const bob2 = 'bob2'; + const charlie2 = 'charlie2'; + const accounts = [{ email: alice2 }, { email: bob2 }, { email: charlie2 }]; let sts_creds; - let role_alice = 'role_alice'; + const role_alice = 'role_alice'; mocha.after(async function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(60000); - for (let account of accounts) { + for (const account of accounts) { await rpc_client.account.delete_account({ email: account.email }); } }); @@ -466,7 +466,7 @@ mocha.describe('Session token tests', function() { }; const account_defaults = { has_login: false, s3_access: true }; - for (let account of accounts) { + for (const account of accounts) { account.access_keys = (await rpc_client.account.create_account({ ...account_defaults, name: account.email, @@ -525,17 +525,17 @@ mocha.describe('Session token tests', function() { }); mocha.it('user b assume role of user a - default expiry - list s3 - should be allowed', async function() { - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_s3_with_session_token = new AWS.S3({ + const temp_s3_with_session_token = new AWS.S3({ ...sts_creds, endpoint: coretest.get_https_address(), accessKeyId: result_obj.access_key, @@ -543,22 +543,22 @@ mocha.describe('Session token tests', function() { sessionToken: result_obj.session_token }); - let buckets1 = await temp_s3_with_session_token.listBuckets().promise(); + const buckets1 = await temp_s3_with_session_token.listBuckets().promise(); assert.ok(buckets1.Buckets.length > 0); }); mocha.it('user b assume role of user a - default expiry - list s3 without session token - should be rejected', async function() { - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_s3 = new AWS.S3({ + const temp_s3 = new AWS.S3({ ...sts_creds, endpoint: coretest.get_https_address(), accessKeyId: result_obj.access_key, @@ -570,21 +570,21 @@ mocha.describe('Session token tests', function() { }); mocha.it('user b, user c assume role of user a - default expiry - user b list s3 with session token of user c- should be rejected', async function() { - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json1 = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj1 = validate_assume_role_response(json1, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj1 = validate_assume_role_response(json1, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); const json2 = await assume_role_and_parse_xml(accounts[2].sts, params); - let result_obj2 = validate_assume_role_response(json2, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj2 = validate_assume_role_response(json2, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_s3 = new AWS.S3({ + const temp_s3 = new AWS.S3({ ...sts_creds, endpoint: coretest.get_https_address(), accessKeyId: result_obj1.access_key, @@ -597,18 +597,18 @@ mocha.describe('Session token tests', function() { }); mocha.it('user b assume role of user a - default expiry - list s3 with permanent creds and temp session token- should be allowed', async function() { - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); - let user_a_secret = accounts[0].access_keys[0].secret_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_secret = accounts[0].access_keys[0].secret_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_s3_with_session_token = new AWS.S3({ + const temp_s3_with_session_token = new AWS.S3({ ...sts_creds, endpoint: coretest.get_https_address(), accessKeyId: user_a_key, @@ -621,17 +621,17 @@ mocha.describe('Session token tests', function() { }); mocha.it('user b assume role of user a - default expiry - list s3 with faulty temp session token- should be allowed', async function() { - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_s3_with_session_token = new AWS.S3({ + const temp_s3_with_session_token = new AWS.S3({ ...sts_creds, endpoint: coretest.get_https_address(), accessKeyId: result_obj.access_key, @@ -644,18 +644,18 @@ mocha.describe('Session token tests', function() { }); mocha.it('user b assume role of user a - default expiry - assume role sts with permanent creds and temp session token- should be allowed', async function() { - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); - let user_a_secret = accounts[0].access_keys[0].secret_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_secret = accounts[0].access_keys[0].secret_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_sts_with_session_token = new AWS.STS({ + const temp_sts_with_session_token = new AWS.STS({ ...sts_creds, endpoint: coretest.get_https_address_sts(), accessKeyId: user_a_key, @@ -668,17 +668,17 @@ mocha.describe('Session token tests', function() { }); mocha.it('user b assume role of user a - default expiry - assume role sts faulty temp session token- should be allowed', async function() { - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_sts_with_session_token = new AWS.STS({ + const temp_sts_with_session_token = new AWS.STS({ ...sts_creds, endpoint: coretest.get_https_address_sts(), accessKeyId: result_obj.access_key, @@ -692,17 +692,17 @@ mocha.describe('Session token tests', function() { mocha.it('user b assume role of user a - expiry 0 - list s3 - should be rejected', async function() { config.STS_DEFAULT_SESSION_TOKEN_EXPIRY_MS = 0; - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_s3_with_session_token = new AWS.S3({ + const temp_s3_with_session_token = new AWS.S3({ ...sts_creds, endpoint: coretest.get_https_address(), accessKeyId: result_obj.access_key, @@ -717,17 +717,17 @@ mocha.describe('Session token tests', function() { mocha.it('user b assume role of user a - expiry 0 - assume role sts - should be rejected', async function() { config.STS_DEFAULT_SESSION_TOKEN_EXPIRY_MS = 0; - let user_a_key = accounts[0].access_keys[0].access_key.unwrap(); + const user_a_key = accounts[0].access_keys[0].access_key.unwrap(); const params = { RoleArn: `arn:aws:sts::${user_a_key}:role/${role_alice}`, RoleSessionName: 'just_a_dummy_session_name' }; const json = await assume_role_and_parse_xml(accounts[1].sts, params); - let result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, + const result_obj = validate_assume_role_response(json, `arn:aws:sts::${user_a_key}:assumed-role/${role_alice}/${params.RoleSessionName}`, `${user_a_key}:${params.RoleSessionName}`, user_a_key); - let temp_sts_with_session_token = new AWS.STS({ + const temp_sts_with_session_token = new AWS.STS({ ...sts_creds, endpoint: coretest.get_https_address_sts(), accessKeyId: result_obj.access_key, @@ -778,7 +778,7 @@ mocha.describe('Assume role policy tests', function() { mocha.it('create account with role policy- invalid principal', async function() { const invalid_action = { principal: ['non_existing_email'] }; const email = 'assume_email3'; - let assume_role_policy = { + const assume_role_policy = { ...valid_assume_policy, statement: [{ ...valid_assume_policy.statement[0], @@ -799,7 +799,7 @@ mocha.describe('Assume role policy tests', function() { mocha.it('create account with role policy- invalid effect', async function() { const invalid_action = { effect: 'non_existing_effect' }; const email = 'assume_email3'; - let assume_role_policy = { + const assume_role_policy = { ...valid_assume_policy, statement: [{ ...valid_assume_policy.statement[0], @@ -820,7 +820,7 @@ mocha.describe('Assume role policy tests', function() { mocha.it('create account with role policy - invalid action', async function() { const invalid_action = { action: ['sts:InvalidAssumeRole'] }; const email = 'assume_email3'; - let assume_role_policy = { + const assume_role_policy = { ...valid_assume_policy, statement: [{ ...valid_assume_policy.statement[0], diff --git a/src/test/unit_tests/test_wait_queue.js b/src/test/unit_tests/test_wait_queue.js index d5b2431728..f0dd995c6e 100644 --- a/src/test/unit_tests/test_wait_queue.js +++ b/src/test/unit_tests/test_wait_queue.js @@ -27,7 +27,7 @@ mocha.describe('wait_queue', function() { function do_wake() { woke += 1; } - var item = { + const item = { foo: 'bar', }; return P.fcall(function() { @@ -75,7 +75,7 @@ mocha.describe('wait_queue', function() { function do_wake() { woke += 1; } - var item = { + const item = { foo: 'bar!', }; return P.fcall(function() { diff --git a/src/test/unit_tests/test_zip_utils.js b/src/test/unit_tests/test_zip_utils.js index f0fe7d4c60..c27fc5ce71 100644 --- a/src/test/unit_tests/test_zip_utils.js +++ b/src/test/unit_tests/test_zip_utils.js @@ -14,7 +14,7 @@ const fs_utils = require('../../util/fs_utils'); mocha.describe('zip_utils', function() { - var temp_dir; + let temp_dir; mocha.before(async function() { temp_dir = await fs.promises.mkdtemp('/tmp/test_zip_utils_'); @@ -98,7 +98,7 @@ function check_files(files, files2) { const files_sorted = _.sortBy(files, 'path'); const files2_sorted = _.sortBy(files2.filter(f => !f.path.endsWith('/')), 'path'); const l = Math.max(files_sorted.length, files2_sorted.length); - for (var i = 0; i < l; ++i) { + for (let i = 0; i < l; ++i) { const file = files_sorted[i]; const file2 = files2_sorted[i]; assert.strictEqual(typeof file.path, 'string'); @@ -129,11 +129,11 @@ function random_file_name(len) { * @returns {string} */ function charset_range(range) { - var charset = ''; + let charset = ''; const start = range.charCodeAt(0); const end = range.charCodeAt(2); assert(start <= end); - for (var i = start; i <= end; ++i) { + for (let i = start; i <= end; ++i) { charset += String.fromCharCode(i); } return charset; diff --git a/src/test/unrelated/map_vs_object_benchmark.js b/src/test/unrelated/map_vs_object_benchmark.js index 572752c145..99b9bf22ad 100644 --- a/src/test/unrelated/map_vs_object_benchmark.js +++ b/src/test/unrelated/map_vs_object_benchmark.js @@ -2,7 +2,7 @@ 'use strict'; -var PREFIX; +let PREFIX; PREFIX = 'BoomBaLoomBa'; console.log(''); @@ -26,9 +26,9 @@ test(114467); test(114468); function test(n) { - var i; - var obj = {}; - var map = new Map(); + let i; + const obj = {}; + const map = new Map(); console.log('Testing', n, 'items ...'); compare('[set] ', function() { for (i = 0; i < n; ++i) { @@ -40,13 +40,13 @@ function test(n) { } }); compare('[get] ', function() { - var sum = 0; + let sum = 0; for (i = 0; i < n; ++i) { sum += map.get(PREFIX + i); } return sum; }, function() { - var sum = 0; + let sum = 0; for (i = 0; i < n; ++i) { sum += obj[PREFIX + i]; } @@ -65,21 +65,21 @@ function test(n) { } function compare(name, func1, func2) { - var sum1 = 0; - var sum2 = 0; - var count = 0; + let sum1 = 0; + let sum2 = 0; + let count = 0; while (sum1 < 200 || sum2 < 200) { - var time1 = Date.now(); + const time1 = Date.now(); func1(); - var time2 = Date.now(); + const time2 = Date.now(); func2(); - var time3 = Date.now(); + const time3 = Date.now(); sum1 += time2 - time1; sum2 += time3 - time2; count += 1; } - var avg1 = sum1 / count; - var avg2 = sum2 / count; + const avg1 = sum1 / count; + const avg2 = sum2 / count; console.log(name, 'MAP is ' + (100 * (avg2 - avg1) / avg2).toFixed(0) + '% faster than OBJ', ' (' + avg1.toFixed(6) + ' ms vs. ' + avg2.toFixed(6) + ' ms)'); diff --git a/src/test/unrelated/measure_bind_perf.js b/src/test/unrelated/measure_bind_perf.js index d37ff5a372..0b61eaa6f9 100644 --- a/src/test/unrelated/measure_bind_perf.js +++ b/src/test/unrelated/measure_bind_perf.js @@ -1,8 +1,8 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var js_utils = require('../util/js_utils'); +const _ = require('lodash'); +const js_utils = require('../util/js_utils'); function Clazz() { /* Clazz? */ } @@ -11,13 +11,13 @@ Clazz.prototype.func = function() { }; Clazz.prototype.measure = function() { - var self = this; - var start = Date.now(); - var now = Date.now(); - var count = 0; - var run = true; + const self = this; + const start = Date.now(); + let now = Date.now(); + let count = 0; + let run = true; while (run) { - for (var i = 0; i < 100000; ++i) { + for (let i = 0; i < 100000; ++i) { if (self.func() !== self) { throw new Error('HUH'); } @@ -34,23 +34,23 @@ Clazz.prototype.measure = function() { }; console.log('\nBIND'); -var binded = new Clazz(); +const binded = new Clazz(); binded.func = binded.func.bind(binded); binded.measure(); console.log('\nLODASH (_.bindAll)'); -var lodasher = new Clazz(); +const lodasher = new Clazz(); _.bindAll(lodasher); binded.measure(); console.log('\nCLOSURE'); -var closure = new Clazz(); +const closure = new Clazz(); closure.func = function() { return Clazz.prototype.func.apply(closure, arguments); }; closure.measure(); console.log('\nSELF BIND (js_utils.self_bind)'); -var selfbind = new Clazz(); +const selfbind = new Clazz(); js_utils.self_bind(selfbind); selfbind.measure(); diff --git a/src/test/unrelated/spawn_lsof_issue_with_cluster.js b/src/test/unrelated/spawn_lsof_issue_with_cluster.js index 068d86d4d8..dcb6537ac1 100644 --- a/src/test/unrelated/spawn_lsof_issue_with_cluster.js +++ b/src/test/unrelated/spawn_lsof_issue_with_cluster.js @@ -1,12 +1,12 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var fs = require('fs'); -var net = require('net'); -var cluster = require('cluster'); -var child_process = require('child_process'); +const fs = require('fs'); +const net = require('net'); +const cluster = require('cluster'); +const child_process = require('child_process'); -var fname = '/tmp/spawn_lsof'; +const fname = '/tmp/spawn_lsof'; if (cluster.isMaster) { fs.unlinkSync(fname); @@ -14,7 +14,7 @@ if (cluster.isMaster) { show_spawn_fds('MASTER BEFORE FORK'); cluster.fork(); - var server = net.createServer(); + const server = net.createServer(); server.listen(function() { console.log('LISTENING ON PORT', server.address().port); }); @@ -28,8 +28,8 @@ if (cluster.isMaster) { function show_spawn_fds(who) { console.log(who); - var stdout = fs.openSync(fname, 'a'); - var ret = child_process.spawn('bash', ['-c', 'echo "' + who + '"; lsof -p $$ | grep TCP'], { + const stdout = fs.openSync(fname, 'a'); + const ret = child_process.spawn('bash', ['-c', 'echo "' + who + '"; lsof -p $$ | grep TCP'], { detached: true, stdio: ['ignore', stdout, stdout], cwd: '/tmp' diff --git a/src/test/unrelated/tcp_simultaneous_open.js b/src/test/unrelated/tcp_simultaneous_open.js index 36d1e55f4c..22f7eb8220 100644 --- a/src/test/unrelated/tcp_simultaneous_open.js +++ b/src/test/unrelated/tcp_simultaneous_open.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var net = require('net'); -var tls = require('tls'); +const net = require('net'); +const tls = require('tls'); // var fs = require('fs'); -var delay = 1; +let delay = 1; if (process.argv[1]) { tcp_simultaneous_open(5555, 5556); @@ -16,7 +16,7 @@ if (process.argv[1]) { function tcp_simultaneous_open(local_port, remote_port, attempts) { attempts = attempts || 0; - var conn = net.connect({ + let conn = net.connect({ port: remote_port, localPort: local_port }); @@ -46,14 +46,14 @@ function tcp_simultaneous_open(local_port, remote_port, attempts) { } function tcp_normal_open(listen_port) { - var server = net.createServer(function(conn) { + const server = net.createServer(function(conn) { server.close(); conn.on('readable', function() { on_readable(conn, true); }); }); server.listen(listen_port, function() { - var conn = net.connect(listen_port, function() { + const conn = net.connect(listen_port, function() { conn.write(new_seq_buffer(1)); }); conn.on('readable', function() { @@ -63,7 +63,7 @@ function tcp_normal_open(listen_port) { } function new_seq_buffer(seq) { - var buffer = Buffer.alloc(4); + const buffer = Buffer.alloc(4); buffer.writeInt32BE(seq, 0); return buffer; } @@ -73,11 +73,11 @@ function get_seq_buffer(buffer) { } function on_readable(conn, is_server) { - var run = true; + let run = true; while (run) { - var buffer = conn.read(4); + const buffer = conn.read(4); if (!buffer) return; - var seq = get_seq_buffer(buffer); + const seq = get_seq_buffer(buffer); console.log(is_server ? 'SERVER:' : 'CLIENT:', seq); if (seq >= 100) { setImmediate(do_upgrade); @@ -97,8 +97,8 @@ function on_readable(conn, is_server) { } function upgrade_to_tls(conn, is_server) { - var looper; - var sconn; + let looper; + let sconn; // conn.removeAllListeners(); if (is_server) { sconn = new tls.TLSSocket(conn, { diff --git a/src/test/utils/agent_functions.js b/src/test/utils/agent_functions.js index 2cd5230f0d..bfe7db9865 100644 --- a/src/test/utils/agent_functions.js +++ b/src/test/utils/agent_functions.js @@ -48,7 +48,7 @@ class AgentFunctions { const listNods = await this.list_nodes(mgmt_ip, mgmt_port_https); for (const node of listNods) { if (node.name.includes(suffix)) { - let name = node.name.split('-noobaa_storage-')[0]; + const name = node.name.split('-noobaa_storage-')[0]; if (!name.startsWith('s3-agent')) { test_nodes_names.push(name); } @@ -108,7 +108,7 @@ class AgentFunctions { await client.create_auth_token(auth_params); const listHosts = await client.host.list_hosts({}); for (const names of listHosts.hosts.filter(node => node.mode === 'DECOMMISSIONED')) { - let params = { + const params = { name: names.name, services: { s3: undefined, @@ -126,7 +126,7 @@ class AgentFunctions { await client.create_auth_token(auth_params); const list_hosts = await client.host.list_hosts({}); for (const names of list_hosts.hosts.filter(node => node.mode === 'OPTIMAL')) { - let params = { + const params = { name: names.name, services: { s3: undefined, diff --git a/src/test/utils/basic_server_ops.js b/src/test/utils/basic_server_ops.js index 7453705690..8f4e347a2b 100644 --- a/src/test/utils/basic_server_ops.js +++ b/src/test/utils/basic_server_ops.js @@ -13,7 +13,7 @@ const api = require('../../api'); const request_promise = util.promisify(request); -var test_file = '/tmp/test_upgrade'; +const test_file = '/tmp/test_upgrade'; let rpc_validation_disabled = false; const ext_regex = /^\.[A-Za-z0-9_]{1,4}$/; @@ -130,8 +130,8 @@ function download_file(ip, path) { } function verify_upload_download(ip, path) { - var orig_md5; - var down_path = path + '_download'; + let orig_md5; + const down_path = path + '_download'; return P.resolve(calc_md5(path)) .then(function(md5) { orig_md5 = md5; @@ -165,9 +165,9 @@ async function generate_random_file(size_mb, extension) { extension = extension || '.dat'; if (!extension.startsWith('.')) extension = '.' + extension; if (!ext_regex.test(extension)) throw new Error('bad extension'); - var suffix = Date.now() + '.' + Math.round(Math.random() * 1000) + extension; - var fname = test_file + suffix; - var dd_cmd; + const suffix = Date.now() + '.' + Math.round(Math.random() * 1000) + extension; + const fname = test_file + suffix; + let dd_cmd; if (process.platform === 'darwin') { dd_cmd = 'dd if=/dev/urandom of=' + fname + ' count=' + size_mb + ' bs=1m'; @@ -180,11 +180,11 @@ async function generate_random_file(size_mb, extension) { } function get_rpc_client(ip) { - let rpc = api.new_rpc(); + const rpc = api.new_rpc(); if (rpc_validation_disabled) { rpc.disable_validation(); } - let client = rpc.new_client({ + const client = rpc.new_client({ address: 'ws://' + ip + ':8080' }); return client; @@ -192,10 +192,10 @@ function get_rpc_client(ip) { function wait_on_agents_upgrade(ip) { const client = get_rpc_client(ip); - var sys_ver; + let sys_ver; return P.fcall(function() { - var auth_params = { + const auth_params = { email: 'demo@noobaa.com', password: 'DeMo1', system: 'demo' @@ -215,8 +215,8 @@ function wait_on_agents_upgrade(ip) { .then(function() { //Loop on list_agents until all agents version was updated //Timeout at 10 minutes - var old_agents = true; - var wait_time = 0; + let old_agents = true; + let wait_time = 0; return P.delay(5000).then(function() { return P.pwhile( function() { diff --git a/src/test/utils/bucket_functions.js b/src/test/utils/bucket_functions.js index 437efb33c5..98cd1438b2 100644 --- a/src/test/utils/bucket_functions.js +++ b/src/test/utils/bucket_functions.js @@ -42,7 +42,7 @@ class BucketFunctions { throw new Error('Both erasure coding and replicas cannot be empty'); } - let chunk_coder_config = {}; + const chunk_coder_config = {}; if (replicas) { chunk_coder_config.replicas = replicas; } else { diff --git a/src/test/utils/cloud_functions.js b/src/test/utils/cloud_functions.js index eda9ecab37..03652d1b7a 100644 --- a/src/test/utils/cloud_functions.js +++ b/src/test/utils/cloud_functions.js @@ -89,8 +89,8 @@ class CloudFunction { throw new Error('Failed to get healthy status'); } else { const system_info = await this._client.system.read_system({}); - let poolIndex = system_info.pools.findIndex(pool => pool.name === 'cloud-resource-aws'); - let status = system_info.pools[poolIndex].mode; + const poolIndex = system_info.pools.findIndex(pool => pool.name === 'cloud-resource-aws'); + const status = system_info.pools[poolIndex].mode; if (system_info.pools[poolIndex].mode === 'OPTIMAL') { console.log('Pool ' + poolName + ' is healthy'); break; diff --git a/src/test/utils/s3ops.js b/src/test/utils/s3ops.js index ff08dff328..7c25510ed7 100644 --- a/src/test/utils/s3ops.js +++ b/src/test/utils/s3ops.js @@ -85,7 +85,7 @@ class S3OPS { }); console.log(`>>> UPLOAD - About to upload object... name: ${file_name}, size: ${actual_size}, bucket: ${bucket}`); - let start_ts = Date.now(); + const start_ts = Date.now(); await this.s3.putObject({ Bucket: bucket, Key: file_name, @@ -107,7 +107,7 @@ class S3OPS { try { destination_bucket = destination_bucket || 'first.bucket'; - let params = { + const params = { Bucket: destination_bucket, CopySource: source_bucket + '/' + source + (versionid ? `?versionId=${versionid}` : ''), Key: destination, @@ -115,7 +115,7 @@ class S3OPS { }; const psource = source + (versionid ? ' v' + versionid : ''); console.log('>>> SS COPY - About to copy object... from: ' + psource + ' to: ' + destination); - let start_ts = Date.now(); + const start_ts = Date.now(); await this.s3.copyObject(params).promise(); const file_md5 = verify_md5_map.get(`${this.system_verify_name}/${source_bucket}/${source}`); verify_md5_map.set(`${this.system_verify_name}/${destination_bucket}/${destination}`, file_md5); @@ -209,10 +209,9 @@ class S3OPS { VersionId: versionid ? versionid : undefined, }).promise(); let start_byte = 0; - let file_md5; - let md5 = crypto.createHash('md5'); + const md5 = crypto.createHash('md5'); // file_md5 = res.Metadata.md5; - file_md5 = verify_md5_map.get(`${this.system_verify_name}/${bucket}/${file_name}`); + const file_md5 = verify_md5_map.get(`${this.system_verify_name}/${bucket}/${file_name}`); const file_size = res.ContentLength; const jump = Math.floor(file_size / parts); let finish_byte = start_byte + jump; @@ -240,7 +239,7 @@ class S3OPS { async check_MD5_all_objects(bucket, prefix) { - let params = { + const params = { Bucket: bucket, Prefix: prefix, }; @@ -262,7 +261,7 @@ class S3OPS { if (list.length === 0) { throw new Error('No files with prefix in bucket'); } - let rand = Math.floor(Math.random() * list.length); + const rand = Math.floor(Math.random() * list.length); return list[rand]; } catch (err) { this.log_error(`get_a_random_file:: listObjects - Bucket: ${bucket}, Prefix: ${prefix} failed!`, err); @@ -279,7 +278,7 @@ class S3OPS { if (list.length === 0) { throw new Error('No files with prefix in bucket'); } - let rand = Math.floor(Math.random() * list.length); //Take a random version , not delete marker and return key and versionid + const rand = Math.floor(Math.random() * list.length); //Take a random version , not delete marker and return key and versionid return list[rand]; } catch (err) { this.log_error(`get_a_random_file:: listObjectVersions - Bucket: ${bucket}, Prefix: ${prefix} failed!`, err); @@ -291,13 +290,13 @@ class S3OPS { const suppress_logs = param.suppress_logs; const MaxKeys = param.maxKeys; let ops = 'listObjects'; - let params = { + const params = { Bucket: bucket, Prefix: prefix, MaxKeys, }; let list = []; - let listFiles = []; + const listFiles = []; if (param.version) { ops = 'listObjectVersions'; } @@ -330,7 +329,7 @@ class S3OPS { } async get_list_multipart_uploads(bucket) { - let listFiles = []; + const listFiles = []; try { const { Uploads: list } = await this.s3.listMultipartUploads({ Bucket: bucket }).promise(); if (list.length === 0) { @@ -358,7 +357,7 @@ class S3OPS { UploadIdMarker: uploadIdMarker }; let list = []; - let listFiles = []; + const listFiles = []; try { const listMultipartUploads = await this.s3.listMultipartUploads(params).promise(); console.log(JSON.stringify(listMultipartUploads)); @@ -379,7 +378,7 @@ class S3OPS { } async get_list_prefixes(bucket) { - let listPrefixes = []; + const listPrefixes = []; try { const { CommonPrefixes: list } = await this.s3.listObjects({ Bucket: bucket, @@ -415,7 +414,7 @@ class S3OPS { async delete_file(bucket, file_name, versionid) { try { - let start_ts = Date.now(); + const start_ts = Date.now(); const psource = file_name + (versionid ? 'v' + versionid : ''); console.log('>>> DELETE - About to delete object...' + psource); await this.s3.deleteObject({ @@ -440,7 +439,7 @@ class S3OPS { }; for (const file of files) { - let item = { + const item = { Key: file.filename, }; if (file.versionid) { @@ -473,7 +472,7 @@ class S3OPS { run_list = false; } await this.delete_multiple_files(bucket, _.map(list, item => { - let i = { filename: item.Key }; + const i = { filename: item.Key }; if (item.VersionId) { i.versionid = item.VersionId; } @@ -545,7 +544,7 @@ class S3OPS { } async get_list_buckets(print_error = true) { - let listBuckets = []; + const listBuckets = []; try { const buckets = await this.s3.listBuckets({}).promise(); const list = buckets.Buckets; diff --git a/src/test/utils/server_functions.js b/src/test/utils/server_functions.js index 9388ea7064..0e02b37551 100644 --- a/src/test/utils/server_functions.js +++ b/src/test/utils/server_functions.js @@ -5,7 +5,7 @@ const api = require('../../api'); const Report = require('../framework/report'); const P = require('../../util/promise'); -let report = new Report(); +const report = new Report(); //Enable reporter and set parameters function init_reporter(report_params) { diff --git a/src/tools/coding_speed.js b/src/tools/coding_speed.js index ac3e0d21eb..42ae6b2693 100644 --- a/src/tools/coding_speed.js +++ b/src/tools/coding_speed.js @@ -104,8 +104,8 @@ function main() { verbose: argv.verbose, }); - var total_size = 0; - var num_parts = 0; + let total_size = 0; + let num_parts = 0; const reporter = new stream.Transform({ objectMode: true, allowHalfOpen: false, @@ -149,7 +149,7 @@ function main() { }) .catch(err => { if (!err.chunks) throw err; - var message = ''; + let message = ''; for (const chunk of err.chunks) { message += 'CHUNK ERRORS: ' + chunk.errors.join(',') + '\n'; } diff --git a/src/tools/cpu_speed.js b/src/tools/cpu_speed.js index d70d30d92e..7d322bf6de 100644 --- a/src/tools/cpu_speed.js +++ b/src/tools/cpu_speed.js @@ -24,7 +24,7 @@ function main() { const hasher = crypto.createHash(argv.hash); const buf = crypto.randomBytes(1024 * 1024); const speedometer = new Speedometer('CPU Speed'); - var size = argv.size * 1024 * 1024; + let size = argv.size * 1024 * 1024; console.log(`Crunching ${argv.size} MB with ${argv.hash}...`); run(); diff --git a/src/tools/events_generator.js b/src/tools/events_generator.js index 8524705b6b..b7053ad429 100644 --- a/src/tools/events_generator.js +++ b/src/tools/events_generator.js @@ -1,7 +1,7 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var dotenv = require('../util/dotenv'); +const dotenv = require('../util/dotenv'); dotenv.load(); const _ = require('lodash'); @@ -91,7 +91,7 @@ let has_objects = false; const events = new EventsGenerator(); -let entities = { +const entities = { bucket: { _id: '' }, @@ -199,7 +199,7 @@ EventsGenerator.prototype.generate_alerts = function(num, pri) { }; EventsGenerator.prototype.generate_audit = function(num, cat) { - let events_pool = []; + const events_pool = []; if (cat === 'ALL') { _.map(_.keys(EXISTING_AUDIT_LOGS), c => { //Skip nodes / objects if no entities exist @@ -211,7 +211,7 @@ EventsGenerator.prototype.generate_audit = function(num, cat) { return; } //Update entity ID/name for later audit generation - let ent = events._get_entity(c); + const ent = events._get_entity(c); events_pool.concat(_.map(EXISTING_AUDIT_LOGS[c], function(ev) { events_pool.push(_.defaults({ level: 'info', @@ -227,7 +227,7 @@ EventsGenerator.prototype.generate_audit = function(num, cat) { events.print_usage(); } _.map(EXISTING_AUDIT_LOGS[cat], function(ev) { - let ent = events._get_entity(cat); + const ent = events._get_entity(cat); events_pool.push(_.defaults({ level: 'info', event: cat + '.' + ev, diff --git a/src/tools/gridfs_stress.js b/src/tools/gridfs_stress.js index 960aee28ee..350c69d8d9 100644 --- a/src/tools/gridfs_stress.js +++ b/src/tools/gridfs_stress.js @@ -3,7 +3,7 @@ const mongodb = require('mongodb'); -var global_id = 0; +let global_id = 0; const WRITE_SIZE = 4 * 1024 * 1024; const COLL = 'gridfs_stress'; const CHUNKS_COLL = `${COLL}.chunks`; diff --git a/src/tools/http_speed.js b/src/tools/http_speed.js index 056a0e2ae5..2fe620f6af 100644 --- a/src/tools/http_speed.js +++ b/src/tools/http_speed.js @@ -180,14 +180,14 @@ function run_client_request() { function run_sender(writable) { const req_size = size_bytes; const buf_size = argv.buf; - var n = 0; + let n = 0; writable.on('drain', send); send(); function send() { const buf = Buffer.allocUnsafe(Math.min(buf_size, req_size - n)); - var ok = true; + let ok = true; while (ok && n < req_size) { ok = writable.write(buf); n += buf.length; diff --git a/src/tools/md_blow.js b/src/tools/md_blow.js index 46fc46e683..6a39005b3c 100644 --- a/src/tools/md_blow.js +++ b/src/tools/md_blow.js @@ -65,7 +65,7 @@ async function blow_object(index) { const create_reply = await client.object.create_object_upload(params); params.obj_id = create_reply.obj_id; await blow_parts(params); - let complete_params = _.pick(params, 'bucket', 'key', 'size', 'obj_id'); + const complete_params = _.pick(params, 'bucket', 'key', 'size', 'obj_id'); complete_params.etag = 'bla'; dbg.log0('complete_object_upload', params.key); await client.object.complete_object_upload(complete_params); diff --git a/src/tools/mem_grabber.js b/src/tools/mem_grabber.js index 52da195f19..8641d1cc7a 100644 --- a/src/tools/mem_grabber.js +++ b/src/tools/mem_grabber.js @@ -4,12 +4,12 @@ const crypto = require('crypto'); const argv = require('minimist')(process.argv); const size = argv.size_mb; -var cipher = crypto.createCipheriv('aes-128-gcm', crypto.randomBytes(16), crypto.randomBytes(12)); -var zero_buf = Buffer.alloc(1024 * 1024); +const cipher = crypto.createCipheriv('aes-128-gcm', crypto.randomBytes(16), crypto.randomBytes(12)); +const zero_buf = Buffer.alloc(1024 * 1024); -let arr = []; +const arr = []; for (let index = 0; index < size; index++) { - var buffer = cipher.update(zero_buf); + const buffer = cipher.update(zero_buf); arr.push(buffer); } diff --git a/src/tools/mongo_profiler.js b/src/tools/mongo_profiler.js index f2e601f090..5fb4f07c73 100644 --- a/src/tools/mongo_profiler.js +++ b/src/tools/mongo_profiler.js @@ -17,30 +17,30 @@ // > mongo nbcore src/tools/mongo_profiler.js // -var res = {}; +const res = {}; db.system.profile.find({ ns: { $ne: 'nbcore.system.profile' } }, {}).forEach(function(p) { - var col = p.ns.split('.')[1]; - var key = col + '.' + p.op; - var info = res[key] || { items: [] }; + const col = p.ns.split('.')[1]; + const key = col + '.' + p.op; + const info = res[key] || { items: [] }; res[key] = info; info.items.push(p); }); -for (var key of Object.keys(res)) { - var info = res[key]; - var items = info.items; +for (const key of Object.keys(res)) { + const info = res[key]; + const items = info.items; items.sort(function(a, b) { return a.millis - b.millis; }); - var count = items.length; - var min = info.items[0]; - var med = items[Math.floor(count * 0.5)]; - var p90 = items[Math.floor(count * 0.9)]; - var max = items[count - 1]; + const count = items.length; + const min = info.items[0]; + const med = items[Math.floor(count * 0.5)]; + const p90 = items[Math.floor(count * 0.9)]; + const max = items[count - 1]; print(); print('profile:', key); print(' count:', count); @@ -53,21 +53,21 @@ for (var key of Object.keys(res)) { function profify(p, sep) { sep = sep || ''; - var max_line = 300; - var s = ''; - var keys = Object.keys(p); - var order = ['millis', 'op', 'ns', 'ts', '*', 'locks', 'command', 'query', 'execStats']; - var omit = ['user', 'allUsers', 'client', 'protocol']; + const max_line = 300; + let s = ''; + const keys = Object.keys(p); + const order = ['millis', 'op', 'ns', 'ts', '*', 'locks', 'command', 'query', 'execStats']; + const omit = ['user', 'allUsers', 'client', 'protocol']; keys.sort(function(a, b) { - var ao = (order.indexOf(a) + 1) || (order.indexOf('*') + 1); - var bo = (order.indexOf(b) + 1) || (order.indexOf('*') + 1); + const ao = (order.indexOf(a) + 1) || (order.indexOf('*') + 1); + const bo = (order.indexOf(b) + 1) || (order.indexOf('*') + 1); return ao - bo; }); - for (var k of keys) { + for (const k of keys) { if (omit.indexOf(k) >= 0) continue; if (k === 'command' && p[k].map) p[k].map = p[k].map.slice(0, p[k].map.indexOf('{')); if (k === 'command' && p[k].reduce) p[k].reduce = p[k].reduce.slice(0, p[k].reduce.indexOf('{')); - var v = JSON.stringify(p[k]) || ''; + let v = JSON.stringify(p[k]) || ''; if (v.length > max_line) { v = v.slice(0, max_line) + ' (truncated)'; } diff --git a/src/tools/mongodb_blow.js b/src/tools/mongodb_blow.js index 81994a8a6f..584c8c46cc 100644 --- a/src/tools/mongodb_blow.js +++ b/src/tools/mongodb_blow.js @@ -10,7 +10,7 @@ * */ function random_hex_char() { - let hexchars = "0123456789abcdef"; + const hexchars = "0123456789abcdef"; return hexchars[Math.floor(_rand() * 16)]; } @@ -22,13 +22,13 @@ function random_hex_string(n) { return s; } -let system = db.systems.findOne()._id; -let bucket = db.buckets.findOne()._id; +const system = db.systems.findOne()._id; +const bucket = db.buckets.findOne()._id; for (let j = 0; j < 10000; ++j) { - let array_of_chunks = []; + const array_of_chunks = []; for (let i = 0; i < 1000; ++i) { - let digest_b64 = new HexData(0, random_hex_string(96)).base64(); + const digest_b64 = new HexData(0, random_hex_string(96)).base64(); array_of_chunks.push({ _id: new ObjectId(), system, diff --git a/src/tools/mongodb_bucket_blow.js b/src/tools/mongodb_bucket_blow.js index c51cd0ed36..93aa0c464f 100644 --- a/src/tools/mongodb_bucket_blow.js +++ b/src/tools/mongodb_bucket_blow.js @@ -9,19 +9,19 @@ * */ -let system_id = db.systems.findOne()._id; -let pool_id = db.pools.findOne({ resource_type: { $ne: "INTERNAL" } })._id; -let ccc = db.chunk_configs.findOne()._id; -let now = Date.now(); +const system_id = db.systems.findOne()._id; +const pool_id = db.pools.findOne({ resource_type: { $ne: "INTERNAL" } })._id; +const ccc = db.chunk_configs.findOne()._id; +const now = Date.now(); for (let j = 0; j < 5; ++j) { - let array_of_tiers = []; - let array_of_policies = []; - let array_of_buckets = []; + const array_of_tiers = []; + const array_of_policies = []; + const array_of_buckets = []; for (let i = 0; i < 1000; ++i) { - let tier_id = new ObjectId(); - let policy_id = new ObjectId(); - let bucket_id = new ObjectId(); + const tier_id = new ObjectId(); + const policy_id = new ObjectId(); + const bucket_id = new ObjectId(); array_of_tiers.push({ _id: tier_id, name: 'tier' + ((j * 1000) + i), diff --git a/src/tools/nbcat.js b/src/tools/nbcat.js index fe4ed9af2f..a9f59d445b 100644 --- a/src/tools/nbcat.js +++ b/src/tools/nbcat.js @@ -1,21 +1,21 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var moment = require('moment'); -var size_utils = require('../util/size_utils'); -var api = require('../api'); -var ObjectIO = require('../sdk/object_io'); -var dbg = require('../util/debug_module')(__filename); +const moment = require('moment'); +const size_utils = require('../util/size_utils'); +const api = require('../api'); +const ObjectIO = require('../sdk/object_io'); +const dbg = require('../util/debug_module')(__filename); dbg.set_module_level(5); -var bkt = process.argv[2]; -var key = process.argv[3]; -var start = Number(process.argv[4]) || 0; -var end = Number(process.argv[5]) || Infinity; -var output = process.stdout; -var rpc = api.new_rpc(); -var client = rpc.new_client(); -var object_io = new ObjectIO(); +const bkt = process.argv[2]; +const key = process.argv[3]; +const start = Number(process.argv[4]) || 0; +const end = Number(process.argv[5]) || Infinity; +const output = process.stdout; +const rpc = api.new_rpc(); +const client = rpc.new_client(); +const object_io = new ObjectIO(); if (!bkt) { init_api().then(function() { diff --git a/src/tools/ntcp_speed.js b/src/tools/ntcp_speed.js index d805bbfccd..ee6ccd7a70 100644 --- a/src/tools/ntcp_speed.js +++ b/src/tools/ntcp_speed.js @@ -1,12 +1,12 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -let Ntcp = require('../util/nb_native')().Ntcp; -let Speedometer = require('../util/speedometer'); -let argv = require('minimist')(process.argv); +const Ntcp = require('../util/nb_native')().Ntcp; +const Speedometer = require('../util/speedometer'); +const argv = require('minimist')(process.argv); argv.size = argv.size || 1024 * 1024; argv.port = Number(argv.port) || 50505; -let g_servers = []; -let g_connections = []; +const g_servers = []; +const g_connections = []; main(); function main() { @@ -30,7 +30,7 @@ function usage() { function run_server(port) { console.log('SERVER', port, 'size', argv.size); - let server = new Ntcp(); + const server = new Ntcp(); g_servers.push(server); server.on('connection', conn => { setup_conn(conn); @@ -48,7 +48,7 @@ function run_server(port) { function run_client(port, host) { console.log('CLIENT', host + ':' + port, 'size', argv.size); - let conn = new Ntcp(); + const conn = new Ntcp(); conn.connect(port, host, () => run_sender(conn)); setup_conn(conn); } @@ -66,11 +66,11 @@ function setup_conn(conn) { function run_sender(conn) { console.log('client connected'); - let send_speedometer = new Speedometer('Send Speed'); + const send_speedometer = new Speedometer('Send Speed'); send(); function send() { - let buf = Buffer.allocUnsafe(argv.size); + const buf = Buffer.allocUnsafe(argv.size); conn.write(buf, () => { send_speedometer.update(buf.length); setImmediate(send); @@ -79,6 +79,6 @@ function run_sender(conn) { } function run_receiver(conn) { - let recv_speedometer = new Speedometer('Receive Speed'); + const recv_speedometer = new Speedometer('Receive Speed'); conn.on('message', data => recv_speedometer.update(data.length)); } diff --git a/src/tools/rpc_shell.js b/src/tools/rpc_shell.js index 0d819071a4..0ff6919afb 100644 --- a/src/tools/rpc_shell.js +++ b/src/tools/rpc_shell.js @@ -1,18 +1,18 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var dotenv = require('../util/dotenv'); +const dotenv = require('../util/dotenv'); dotenv.load(); -var _ = require('lodash'); -var repl = require('repl'); -var util = require('util'); -var api = require('../api'); -var P = require('../util/promise'); -var argv = require('minimist')(process.argv); +const _ = require('lodash'); +const repl = require('repl'); +const util = require('util'); +const api = require('../api'); +const P = require('../util/promise'); +const argv = require('minimist')(process.argv); -var repl_srv; -var rpcshell = new RPCShell(); +let repl_srv; +const rpcshell = new RPCShell(); argv.email = argv.email || 'demo@noobaa.com'; argv.password = argv.password || 'DeMo1'; @@ -25,7 +25,7 @@ function RPCShell() { } function construct_rpc_arguments(str_args) { - var ret_json; + let ret_json; try { ret_json = JSON.parse(str_args); } catch (err) { @@ -37,9 +37,9 @@ function construct_rpc_arguments(str_args) { //Construct a map of different API topics and their functions RPCShell.prototype.init = function() { - var self = this; + const self = this; this.APIs = {}; - var ignore_keys = [ + const ignore_keys = [ 'options', 'common', 'create_auth_token', @@ -47,7 +47,7 @@ RPCShell.prototype.init = function() { ]; return P.fcall(function() { - var auth_params = { + const auth_params = { email: argv.email, password: argv.password, system: argv.system @@ -73,7 +73,7 @@ RPCShell.prototype.list = function() { //List of commands RPCShell.prototype.list_functions = function() { - var list_str = '\nAvailable commands are:\n' + + let list_str = '\nAvailable commands are:\n' + ' .list - show available API\n' + ' .show - show all functions under a specific API\n' + ' .call [args] - invokes the RPC call API.FUNC and passes args as arguments\n' + @@ -110,9 +110,9 @@ RPCShell.prototype.show = function(apiname) { }; RPCShell.prototype.call = function(str_args) { - var args = []; - var params; - var self = this; + let args = []; + let params; + const self = this; if (argv.run) { args[0] = argv.api; args[1] = argv.func; @@ -138,7 +138,7 @@ RPCShell.prototype.call = function(str_args) { if (!args[1]) { console.warn('Function name not supplied for', args[0]); } - var func_ind = _.indexOf(this.APIs[args[0]], args[1]); + const func_ind = _.indexOf(this.APIs[args[0]], args[1]); if (func_ind === -1) { console.log(args[1], 'Function does not exist for', args[0]); if (argv.run) { @@ -147,9 +147,9 @@ RPCShell.prototype.call = function(str_args) { return; } - var apiname = args[0]; - var func = this.APIs[args[0]][func_ind]; - var rpc_args = construct_rpc_arguments(params); + const apiname = args[0]; + const func = this.APIs[args[0]][func_ind]; + const rpc_args = construct_rpc_arguments(params); if (rpc_args === null) { console.error('Invalid JSON String', params); if (argv.run) { @@ -185,8 +185,8 @@ RPCShell.prototype.call = function(str_args) { }; RPCShell.prototype.params = function(str_args) { - var args = []; - var self = this; + let args = []; + const self = this; if (argv.run) { args[0] = argv.api; @@ -211,7 +211,7 @@ RPCShell.prototype.params = function(str_args) { if (!args[1]) { console.warn('Function name not supplied for', args[0]); } - var func_ind = _.indexOf(this.APIs[args[0]], args[1]); + const func_ind = _.indexOf(this.APIs[args[0]], args[1]); if (func_ind === -1) { console.log(args[1], 'Function does not exist for', args[0]); if (argv.run) { @@ -220,8 +220,8 @@ RPCShell.prototype.params = function(str_args) { return; } - var apiname = args[0]; - var func = this.APIs[args[0]][func_ind]; + const apiname = args[0]; + const func = this.APIs[args[0]][func_ind]; return P.fcall(function() { console.log(`Parameters of ${apiname}.${func} are:`, self.rpc.schema[apiname + '_api'].methods[func].params); if (!argv.run) { @@ -254,7 +254,7 @@ function main() { //Bind RPCshell functions to repl _.forIn(rpcshell, function(val, key) { if (typeof(val) === 'function') { - var action = val.bind(rpcshell); + const action = val.bind(rpcshell); repl_srv.defineCommand(key, { action: action }); diff --git a/src/tools/s3cat.js b/src/tools/s3cat.js index 67a6df2e0c..243936ff78 100644 --- a/src/tools/s3cat.js +++ b/src/tools/s3cat.js @@ -121,11 +121,11 @@ async function list_objects() { console.log('Prefix:', prefix.Prefix); } for (const obj of res.Contents) { - let key = obj.Key; + const key = obj.Key; let size = size_utils.human_size(obj.Size); size = ' '.slice(size.length) + size; - let mtime = moment(new Date(obj.LastModified)).format('MMM DD HH:mm'); - let owner = (obj.Owner && (obj.Owner.DisplayName || obj.Owner.ID)) || '?'; + const mtime = moment(new Date(obj.LastModified)).format('MMM DD HH:mm'); + const owner = (obj.Owner && (obj.Owner.DisplayName || obj.Owner.ID)) || '?'; if (argv.ll) { console.log(owner, size, mtime, key, JSON.stringify(_.omit(obj, 'Key', 'Size', 'Owner', 'LastModified'))); @@ -162,11 +162,11 @@ async function list_objects_v2() { console.log('Prefix:', prefix.Prefix); } for (const obj of res.Contents) { - let key = obj.Key; + const key = obj.Key; let size = size_utils.human_size(obj.Size); size = ' '.slice(size.length) + size; - let mtime = moment(new Date(obj.LastModified)).format('MMM DD HH:mm'); - let owner = (obj.Owner && (obj.Owner.DisplayName || obj.Owner.ID)) || '?'; + const mtime = moment(new Date(obj.LastModified)).format('MMM DD HH:mm'); + const owner = (obj.Owner && (obj.Owner.DisplayName || obj.Owner.ID)) || '?'; if (argv.ll_v2) { console.log(owner, size, mtime, key, JSON.stringify(_.omit(obj, 'Key', 'Size', 'Owner', 'LastModified'))); @@ -234,7 +234,7 @@ function delete_objects() { } function upload_object() { - let file_path = argv.file || ''; + const file_path = argv.file || ''; let upload_key = (_.isString(argv.upload) && argv.upload) || (_.isString(argv.put) && argv.put) || @@ -244,7 +244,7 @@ function upload_object() { argv.part_size = argv.part_size || 32; let data_source; let data_size; - let part_size = argv.part_size * 1024 * 1024; + const part_size = argv.part_size * 1024 * 1024; if (file_path) { upload_key = upload_key || file_path + '-' + Date.now().toString(36); data_source = fs.createReadStream(file_path, { @@ -279,9 +279,9 @@ function upload_object() { console.error('UPLOAD ERROR:', err); return; } - let end_time = Date.now(); - let total_seconds = (end_time - start_time) / 1000; - let speed_str = (data_size / total_seconds / 1024 / 1024).toFixed(0); + const end_time = Date.now(); + const total_seconds = (end_time - start_time) / 1000; + const speed_str = (data_size / total_seconds / 1024 / 1024).toFixed(0); console.log('upload done.', speed_str, 'MB/sec'); } @@ -324,7 +324,7 @@ function upload_object() { } if (argv.perf) { - let progress = { + const progress = { loaded: 0 }; s3.createMultipartUpload({ @@ -371,8 +371,8 @@ function upload_object() { data_source.pause(); } //console.log('uploadPart'); - let data_start_time = Date.now(); - let part_num = next_part_num; + const data_start_time = Date.now(); + const part_num = next_part_num; s3.uploadPart({ Bucket: argv.bucket, Key: upload_key, @@ -386,7 +386,7 @@ function upload_object() { console.error('s3.uploadPart ERROR', err2); return; } - let took = Date.now() - data_start_time; + const took = Date.now() - data_start_time; // console.log('Part', part_num, 'Took', took, 'ms'); latency_avg += took; data_source.resume(); @@ -431,9 +431,9 @@ function get_object() { console.error('GET ERROR:', err2); return; } - let end_time = Date.now(); - let total_seconds = (end_time - start_time) / 1000; - let speed_str = (data_size / total_seconds / 1024 / 1024).toFixed(0); + const end_time = Date.now(); + const total_seconds = (end_time - start_time) / 1000; + const speed_str = (data_size / total_seconds / 1024 / 1024).toFixed(0); console.log('get done.', speed_str, 'MB/sec'); } diff --git a/src/tools/s3perf.js b/src/tools/s3perf.js index 80d7ad49a5..17cb9983b5 100644 --- a/src/tools/s3perf.js +++ b/src/tools/s3perf.js @@ -35,14 +35,14 @@ if (argv.upload && data_size < argv.part_size * 1024 * 1024) { const start_time = Date.now(); -var op_lat_sum = 0; -var op_count = 0; -var op_size = 0; -var last_reported = start_time; -var last_op_count = 0; -var last_op_lat_sum = 0; +let op_lat_sum = 0; +let op_count = 0; +let op_size = 0; +let last_reported = start_time; +let last_op_count = 0; +let last_op_lat_sum = 0; -var op_func; +let op_func; if (argv.help) { print_usage(); @@ -204,7 +204,7 @@ async function run_worker_loop() { } } -let _object_keys = []; +const _object_keys = []; let _object_keys_next = 0; let _object_keys_done = false; let _object_keys_promise = null; diff --git a/src/tools/s3select.js b/src/tools/s3select.js index eac446ee40..50ed632c2c 100644 --- a/src/tools/s3select.js +++ b/src/tools/s3select.js @@ -38,7 +38,7 @@ class S3SelectStream extends Transform { async _transform(chunk, encoding, cb) { //console.log("got chunk ", chunk.length); - let res = await this.s3select.write(chunk); + const res = await this.s3select.write(chunk); if (res) { this.push(res.select); } @@ -47,7 +47,7 @@ class S3SelectStream extends Transform { async _flush(cb) { //console.log("in flush"); - let res = await this.s3select.flush(); + const res = await this.s3select.flush(); if (res) { this.push(res.select); } diff --git a/src/tools/stun_server.js b/src/tools/stun_server.js index 9b64542283..1063fbdffa 100644 --- a/src/tools/stun_server.js +++ b/src/tools/stun_server.js @@ -1,12 +1,12 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -let dgram = require('dgram'); -let stun = require('../rpc/stun'); -let argv = require('minimist')(process.argv); +const dgram = require('dgram'); +const stun = require('../rpc/stun'); +const argv = require('minimist')(process.argv); argv.port = argv.port || 3478; -let socket = dgram.createSocket('udp4'); +const socket = dgram.createSocket('udp4'); socket.on('message', on_message); socket.on('listening', on_listening); socket.bind(argv.port); @@ -21,9 +21,9 @@ function on_message(buffer, rinfo) { return; } console.log('STUN', stun.get_method_name(buffer), 'from', rinfo.address + ':' + rinfo.port); - var method = stun.get_method_field(buffer); + const method = stun.get_method_field(buffer); if (method === stun.METHODS.REQUEST) { - var reply = stun.new_packet(stun.METHODS.SUCCESS, [{ + const reply = stun.new_packet(stun.METHODS.SUCCESS, [{ type: stun.ATTRS.XOR_MAPPED_ADDRESS, value: { family: 'IPv4', diff --git a/src/upgrade/upgrade_manager.js b/src/upgrade/upgrade_manager.js index 907801ed05..762855fde3 100644 --- a/src/upgrade/upgrade_manager.js +++ b/src/upgrade/upgrade_manager.js @@ -96,7 +96,7 @@ async function load_required_scripts(server_version, container_version) { .sort(version_compare); dbg.log0(`found the following versions with upgrade scripts which are newer than server version (${server_version}):`, newer_versions); // get all scripts under new_versions - let upgrade_scripts = _.flatMap(newer_versions, ver => { + const upgrade_scripts = _.flatMap(newer_versions, ver => { const full_path = path.join(upgrade_scripts_dir, ver); const scripts = fs.readdirSync(full_path); return scripts.map(script => path.join(full_path, script)); @@ -127,7 +127,7 @@ async function run_upgrade() { let exit_code = 0; const container_version = pkg.version; - let server_version = _.get(system_store, 'data.systems.0.current_version'); + const server_version = _.get(system_store, 'data.systems.0.current_version'); let current_version = server_version; if (should_upgrade(server_version, container_version)) { const this_upgrade = { @@ -136,7 +136,7 @@ async function run_upgrade() { from_version: server_version, to_version: container_version }; - let upgrade_history = system_store.data.systems[0].upgrade_history; + const upgrade_history = system_store.data.systems[0].upgrade_history; try { const upgrade_scripts = await load_required_scripts(server_version, container_version); for (const script of upgrade_scripts) { diff --git a/src/util/NetStorageKit-Node-master/lib/api-auth.js b/src/util/NetStorageKit-Node-master/lib/api-auth.js index 1c7e5e2c8a..aa5e4fe0c2 100755 --- a/src/util/NetStorageKit-Node-master/lib/api-auth.js +++ b/src/util/NetStorageKit-Node-master/lib/api-auth.js @@ -15,8 +15,8 @@ class Auth { } auth(netstoragePath, actionHeaders) { - var acs_auth_data = ''; - var acs_auth_sign = ''; + let acs_auth_data = ''; + let acs_auth_sign = ''; try { acs_auth_data = `5, 0.0.0.0, 0.0.0.0, ${Math.floor(Date.now() / 1000)}, ${Math.floor((Math.random() * 100000))}, ${this.opts.keyName}`; diff --git a/src/util/NetStorageKit-Node-master/lib/api-request-parser.js b/src/util/NetStorageKit-Node-master/lib/api-request-parser.js index b4effc44ca..de1ee16672 100755 --- a/src/util/NetStorageKit-Node-master/lib/api-request-parser.js +++ b/src/util/NetStorageKit-Node-master/lib/api-request-parser.js @@ -16,7 +16,7 @@ class Parser { } const parsedResults = _.mergeWith({}, results, function(a, b) { - var obj = {}; + const obj = {}; Object.keys(b).forEach(function(key) { if (key === '$') { obj.attribs = b[key]; diff --git a/src/util/NetStorageKit-Node-master/lib/api-request.js b/src/util/NetStorageKit-Node-master/lib/api-request.js index 0bb3cd9b68..353492be57 100755 --- a/src/util/NetStorageKit-Node-master/lib/api-request.js +++ b/src/util/NetStorageKit-Node-master/lib/api-request.js @@ -27,7 +27,7 @@ class Requestor { const netstoragePath = this.validatePath(requestArgs.path); const authData = this.auth.auth(netstoragePath, acs_action); - var options = { + const options = { method: requestArgs.method, host: this.requestorOptions.hostname, path: netstoragePath, @@ -47,7 +47,7 @@ class Requestor { if (requestArgs.action === 'download' && ok) { return callback(null, res); } else { - var buffers = []; + const buffers = []; res.on('data', data => buffers.push(data)) .on('end', () => { res.body = Buffer.concat(buffers).toString('binary'); diff --git a/src/util/NetStorageKit-Node-master/lib/netstorage.js b/src/util/NetStorageKit-Node-master/lib/netstorage.js index e1aae17022..01d99f3951 100755 --- a/src/util/NetStorageKit-Node-master/lib/netstorage.js +++ b/src/util/NetStorageKit-Node-master/lib/netstorage.js @@ -161,9 +161,9 @@ class Netstorage { } buildRequestOptions(vmethod, vopts) { - var method = vmethod; - var opts = vopts; - var baseActions = `${method}&format=xml`; + const method = vmethod; + const opts = vopts; + const baseActions = `${method}&format=xml`; if (typeof opts === 'object') { if (opts.path) { if (opts.actions instanceof Object && Object.keys(opts.actions).length > 0) { @@ -190,8 +190,8 @@ class Netstorage { } buildRequestActions(vactions) { - var actions = vactions; - var parsedActions = ''; + const actions = vactions; + let parsedActions = ''; Object.keys(actions).forEach(action => { parsedActions += `&${action}=${actions[action]}`; }); diff --git a/src/util/addr_utils.js b/src/util/addr_utils.js index bfdb6a79e2..c59bade597 100644 --- a/src/util/addr_utils.js +++ b/src/util/addr_utils.js @@ -26,7 +26,7 @@ function get_base_address(address_list, options = {}) { addr.api === api ); - let default_port = api_default_ports[api]; + const default_port = api_default_ports[api]; if (hint === 'EXTERNAL') { const external_addrs = api_list.filter(addr => addr.kind === 'EXTERNAL' && diff --git a/src/util/barrier.js b/src/util/barrier.js index 0c2698d407..181a3eed88 100644 --- a/src/util/barrier.js +++ b/src/util/barrier.js @@ -1,8 +1,8 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var P = require('../util/promise'); +const _ = require('lodash'); +const P = require('../util/promise'); module.exports = Barrier; @@ -23,7 +23,7 @@ module.exports = Barrier; * */ function Barrier(options) { - var self = this; + const self = this; options = options || {}; self.max_length = options.max_length || 100; self.expiry_ms = options.expiry_ms || 1000; // default 1 second @@ -44,12 +44,12 @@ function Barrier(options) { * */ Barrier.prototype.call = function(item) { - var self = this; + const self = this; return P.fcall(function() { // add the item to the pending barrier and assign a defer // that will be resolved/rejected per this item. - var defer = new P.Defer(); + const defer = new P.Defer(); self.barrier.items.push(item); self.barrier.defers.push(defer); @@ -77,8 +77,8 @@ Barrier.prototype.call = function(item) { * */ Barrier.prototype.release = function() { - var self = this; - var barrier = self.barrier; + const self = this; + const barrier = self.barrier; clearTimeout(barrier.timeout); // reset a new pending barrier diff --git a/src/util/base_diagnostics.js b/src/util/base_diagnostics.js index 2f54c2c689..f951c810ec 100644 --- a/src/util/base_diagnostics.js +++ b/src/util/base_diagnostics.js @@ -98,7 +98,7 @@ function archive_diagnostics_pack(dst) { //Delete the oldest pack console.log('archive_diagnostics_pack4'); - var sorted_files = _.orderBy(files); + const sorted_files = _.orderBy(files); return fs.promises.unlink(config.central_stats.previous_diag_packs_dir + '/' + sorted_files[0]); } else { console.log('archive_diagnostics_pack5'); @@ -107,8 +107,8 @@ function archive_diagnostics_pack(dst) { .then(function() { console.log('archive_diagnostics_pack6'); //Archive the current pack - var now = new Date(); - var tail = now.getDate() + '-' + (now.getMonth() + 1) + '_' + now.getHours() + '-' + now.getMinutes(); + const now = new Date(); + const tail = now.getDate() + '-' + (now.getMonth() + 1) + '_' + now.getHours() + '-' + now.getMinutes(); return fs_utils.file_copy(dst, config.central_stats.previous_diag_packs_dir + '/DiagPack_' + tail + '.tgz'); }) .then(null, function(err) { @@ -117,7 +117,7 @@ function archive_diagnostics_pack(dst) { } function get_tmp_workdir() { - let is_windows = (process.platform === "win32"); + const is_windows = (process.platform === "win32"); return is_windows ? process.env.ProgramData + '/diag' : '/tmp/diag'; } diff --git a/src/util/buffer_utils.js b/src/util/buffer_utils.js index 84d201fac4..31ebf9a7ab 100644 --- a/src/util/buffer_utils.js +++ b/src/util/buffer_utils.js @@ -49,7 +49,7 @@ function join(buffers, total_length) { */ function extract(buffers, len) { const res = []; - var pos = 0; + let pos = 0; while (pos < len && buffers.length) { const b = buffers[0]; const n = Math.min(b.length, len - pos); @@ -155,8 +155,8 @@ function write_stream() { * @returns {number} */ function count_length(buffers) { - var l = 0; - for (var i = 0; i < buffers.length; ++i) { + let l = 0; + for (let i = 0; i < buffers.length; ++i) { l += buffers[i].length; } return l; diff --git a/src/util/chunk_splitter.js b/src/util/chunk_splitter.js index 89d93c07f8..fd1e7f6bf8 100644 --- a/src/util/chunk_splitter.js +++ b/src/util/chunk_splitter.js @@ -79,10 +79,10 @@ class ChunkSplitter extends stream.Transform { if (err) return callback(err); this.pending_split = input_buf ? [] : null; this.pending_split_len = 0; - var index = 0; + let index = 0; split_points.forEach(size => { const data = []; - var pos = 0; + let pos = 0; while (pos < size) { const needed = size - pos; const buf = this.pending_encode[index]; diff --git a/src/util/chunk_stream.js b/src/util/chunk_stream.js index 45acb1b337..4f6922d35a 100644 --- a/src/util/chunk_stream.js +++ b/src/util/chunk_stream.js @@ -1,7 +1,7 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var stream = require('stream'); +const stream = require('stream'); /** * @@ -26,8 +26,8 @@ class ChunkStream extends stream.Transform { _transform(data, encoding, callback) { // console.log('ChunkStream transform', data.length); while (data && data.length) { - let room = this.chunk_size - this.pending_bytes; - let buf = (room < data.length) ? data.slice(0, room) : data; + const room = this.chunk_size - this.pending_bytes; + const buf = (room < data.length) ? data.slice(0, room) : data; this.pending_buffers.push(buf); this.pending_bytes += buf.length; if (this.pending_bytes === this.chunk_size) { diff --git a/src/util/cloud_utils.js b/src/util/cloud_utils.js index 8d3e2401d3..adf4b6c04a 100644 --- a/src/util/cloud_utils.js +++ b/src/util/cloud_utils.js @@ -15,7 +15,7 @@ const defaultRoleSessionName = 'default_noobaa_s3_ops'; const defaultSTSCredsValidity = 3600; function find_cloud_connection(account, conn_name) { - let conn = (account.sync_credentials_cache || []) + const conn = (account.sync_credentials_cache || []) .filter(sync_conn => sync_conn.name === conn_name)[0]; if (!conn) { @@ -59,7 +59,7 @@ async function generate_aws_sts_creds(params, roleSessionName) { } function get_signed_url(params) { - let s3 = new AWS.S3({ + const s3 = new AWS.S3({ endpoint: params.endpoint, credentials: { accessKeyId: params.access_key.unwrap(), @@ -88,7 +88,7 @@ function get_signed_url(params) { // TODO: remove it after removed all old library code // and rename get_azure_new_connection_string to get_azure_connection_string function get_azure_connection_string(params) { - let endpoint_url = url.parse(params.endpoint); + const endpoint_url = url.parse(params.endpoint); let protocol = (endpoint_url.protocol ? endpoint_url.protocol : 'http:'); protocol = protocol.slice(0, protocol.length - 1); let connection_string = 'DefaultEndpointsProtocol=' + protocol + ';'; diff --git a/src/util/console_wrapper.js b/src/util/console_wrapper.js index b43e957a1a..9bf481e837 100644 --- a/src/util/console_wrapper.js +++ b/src/util/console_wrapper.js @@ -6,11 +6,11 @@ "use strict"; -var _ = require('lodash'); +const _ = require('lodash'); -var wrapperConsole = _.create(console); -var origConsole = console; -var dbg_logger; +const wrapperConsole = _.create(console); +const origConsole = console; +let dbg_logger; /* * @@ -44,7 +44,7 @@ wrapperConsole.warn = function() { * Switching between original console and wrapped one * */ -var syslog_levels = ["trace", "log", "info", "error", "warn"]; +const syslog_levels = ["trace", "log", "info", "error", "warn"]; exports.syslog_levels = syslog_levels; // var log_once_exception = false; diff --git a/src/util/dotenv.js b/src/util/dotenv.js index ebc8f17944..e58a0a4127 100644 --- a/src/util/dotenv.js +++ b/src/util/dotenv.js @@ -31,7 +31,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. const fs = require('fs'); const _ = require('lodash'); -var DROPPED_LINES = { +const DROPPED_LINES = { LINES: [], INDICES: [], }; @@ -45,11 +45,11 @@ module.exports = { */ config: function(options) { const paths = ['.env', '/data/.env']; - let encoding = 'utf8'; + const encoding = 'utf8'; paths.forEach(env_file => { try { - let parsedObj = this.parse(fs.readFileSync(env_file, { + const parsedObj = this.parse(fs.readFileSync(env_file, { encoding: encoding })); @@ -83,27 +83,27 @@ module.exports = { }); } } - var obj = {}; - var idx = 0; + const obj = {}; + let idx = 0; // convert Buffers before splitting into lines and processing src.toString().split('\n') .forEach(function(line) { // matching "KEY' and 'VAL' in 'KEY=VAL' - var keyValueArr = line.match(/^\s*([\w\.\-]+)\s*=\s*(.*)?\s*$/); + const keyValueArr = line.match(/^\s*([\w\.\-]+)\s*=\s*(.*)?\s*$/); // matched? if (keyValueArr === null) { DROPPED_LINES.INDICES.push(idx); DROPPED_LINES.LINES.push(line); // console.warn('line', line); } else { - var key = keyValueArr[1]; + const key = keyValueArr[1]; // default undefined or missing values to empty string - var value = keyValueArr[2] ? keyValueArr[2] : ''; + let value = keyValueArr[2] ? keyValueArr[2] : ''; // expand newlines in quoted values - var len = value ? value.length : 0; + const len = value ? value.length : 0; if (len > 0 && value.charAt(0) === '"' && value.charAt(len - 1) === '"') { value = value.replace(/\\n/gm, '\n'); } @@ -128,13 +128,13 @@ module.exports = { * @param {Object} newVal - param name and new value of param */ set: function(newVal) { - var path = '/data/.env'; - var encoding = 'utf8'; - var silent = false; + const path = '/data/.env'; + const encoding = 'utf8'; + const silent = false; try { // specifying an encoding returns a string instead of a buffer - var newObj = this.replace(fs.readFileSync(path, { + const newObj = this.replace(fs.readFileSync(path, { encoding: encoding }), newVal); @@ -161,18 +161,18 @@ module.exports = { * @returns {Object} */ replace: function(src, newVal) { - var obj = {}; - var found = false; + const obj = {}; + let found = false; // convert Buffers before splitting into lines and processing src.toString().split('\n') .forEach(function(line) { // matching "KEY' and 'VAL' in 'KEY=VAL' - var keyValueArr = line.match(/^\s*([\w\.\-]+)\s*=\s*(.*)?\s*$/); + const keyValueArr = line.match(/^\s*([\w\.\-]+)\s*=\s*(.*)?\s*$/); // matched? if (keyValueArr !== null) { - var key = keyValueArr[1]; - var value; + const key = keyValueArr[1]; + let value; if (key === newVal.key) { value = newVal.value; found = true; diff --git a/src/util/frame_stream.js b/src/util/frame_stream.js index ff83d5266f..016b1ec3e6 100644 --- a/src/util/frame_stream.js +++ b/src/util/frame_stream.js @@ -52,11 +52,11 @@ class FrameStream { * optimized version of send_message - without try-catch that forces v8 deoptimization */ _send_message(buffers, message_type_code_16bit) { - var msg_len = _.sumBy(buffers, 'length'); + const msg_len = _.sumBy(buffers, 'length'); if (msg_len > this._max_len) { throw new Error('message too big' + msg_len); } - var msg_header = Buffer.allocUnsafe(this._header_len); + const msg_header = Buffer.allocUnsafe(this._header_len); msg_header.write(this._magic, 0, this._magic_len, 'ascii'); msg_header.writeUInt16BE(this._send_seq, this._magic_len); msg_header.writeUInt16BE(message_type_code_16bit || 0, this._magic_len + 2); @@ -77,7 +77,7 @@ class FrameStream { // we tried to use stream.cork()/uncork() surrounding the writes, // but there was no noticeable effect. this.stream.write(msg_header); - for (var i = 0; i < buffers.length; ++i) { + for (let i = 0; i < buffers.length; ++i) { this.stream.write(buffers[i]); } } @@ -85,7 +85,7 @@ class FrameStream { _on_data(data) { this._buffers.push(data); this._buffers_length += data.length; - var run = true; + let run = true; while (run) { // read the message header if not already read if (!this._msg_header) { @@ -98,8 +98,8 @@ class FrameStream { this._buffers_length -= this._header_len; this._msg_header = buffer_utils.extract_join(this._buffers, this._header_len); - var magic = this._msg_header.slice(0, this._magic_len).toString(); - var seq = this._msg_header.readUInt16BE(this._magic_len); + const magic = this._msg_header.slice(0, this._magic_len).toString(); + const seq = this._msg_header.readUInt16BE(this._magic_len); // verify the magic if (magic !== this._magic) { @@ -113,7 +113,7 @@ class FrameStream { if (isNaN(this._recv_seq)) { this._recv_seq = seq; } else { - var recv_seq = this._recv_seq + 1; + let recv_seq = this._recv_seq + 1; if (recv_seq >= MAX_SEQ) { recv_seq = 0; } @@ -129,7 +129,7 @@ class FrameStream { } // get the expected message length from the header - var msg_len = this._msg_header.readUInt32BE(this._magic_len + 4); + const msg_len = this._msg_header.readUInt32BE(this._magic_len + 4); // verify it doesn't exceed the maximum to avoid errors // that will cost in lots of memory @@ -148,8 +148,8 @@ class FrameStream { // got a complete message, remove the previous header and emit this._buffers_length -= msg_len; - var msg = buffer_utils.extract(this._buffers, msg_len); - var msg_type = this._msg_header.readUInt16BE(this._magic_len + 2); + const msg = buffer_utils.extract(this._buffers, msg_len); + const msg_type = this._msg_header.readUInt16BE(this._magic_len + 2); this._msg_header = null; this.msg_handler(msg, msg_type); } diff --git a/src/util/fs_utils.js b/src/util/fs_utils.js index 4a5911f31a..5cbfd10422 100644 --- a/src/util/fs_utils.js +++ b/src/util/fs_utils.js @@ -146,7 +146,7 @@ function find_all_lines_in_file(file_name, line_sub_string) { function get_last_line_in_file(file_name) { return fs.promises.readFile(file_name, 'utf8') .then(data => { - let lines = data.split('\n'); + const lines = data.split('\n'); let idx = lines.length - 1; while (!lines[idx] && idx > 0) { idx -= 1; @@ -193,11 +193,11 @@ async function file_delete(file_name) { function full_dir_copy(src, dst, filter_regex) { return P.fromCallback(callback => { ncp.limit = 10; - let ncp_options = {}; + const ncp_options = {}; if (filter_regex) { //this regexp will filter out files that matches, except path. - var ncp_filter_regex = new RegExp(filter_regex); - var ncp_filter_function = input => { + const ncp_filter_regex = new RegExp(filter_regex); + const ncp_filter_function = input => { if (input.indexOf('/') > 0) { return false; } else if (ncp_filter_regex.test(input)) { diff --git a/src/util/histogram.js b/src/util/histogram.js index 32f2438609..32245d4569 100644 --- a/src/util/histogram.js +++ b/src/util/histogram.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); +const _ = require('lodash'); module.exports = Histogram; -var SINGLE_BIN_DEFAULTS = { +const SINGLE_BIN_DEFAULTS = { start_val: 0, count: 0, aggregated_sum: 0, @@ -30,7 +30,7 @@ function Histogram(master_label, structure) { } this._bins = []; - for (var i = 0; i < structure.length; ++i) { + for (let i = 0; i < structure.length; ++i) { this._bins.push(_.cloneDeep(SINGLE_BIN_DEFAULTS)); this._bins[i].label = structure[i].label; this._bins[i].count = 0; @@ -40,7 +40,7 @@ function Histogram(master_label, structure) { } Histogram.prototype.add_value = function(value) { - for (var i = this._bins.length - 1; i >= 0; --i) { + for (let i = this._bins.length - 1; i >= 0; --i) { if (value >= this._bins[i].start_val) { this._bins[i].count += 1; this._bins[i].aggregated_sum += value; @@ -51,7 +51,7 @@ Histogram.prototype.add_value = function(value) { Histogram.prototype.add_aggregated_values = function(values) { - for (var i = this._bins.length - 1; i >= 0; --i) { + for (let i = this._bins.length - 1; i >= 0; --i) { this._bins[i].count += values.count[i]; this._bins[i].aggregated_sum += values.aggregated_sum[i]; } @@ -59,11 +59,11 @@ Histogram.prototype.add_aggregated_values = function(values) { Histogram.prototype.get_object_data = function(skip_master_label) { - var ret = { + const ret = { master_label: skip_master_label ? this._master_label : '', bins: [], }; - for (var i = 0; i < this._bins.length; ++i) { + for (let i = 0; i < this._bins.length; ++i) { ret.bins.push({}); ret.bins[i].label = this._bins[i].label; ret.bins[i].range = this._bins[i].start_val + (i === this._bins.length - 1 ? '+' : '-' + this._bins[i + 1].start_val); @@ -77,8 +77,8 @@ Histogram.prototype.get_object_data = function(skip_master_label) { }; Histogram.prototype.get_string_data = function() { - var str = (typeof(this._master_label) === 'undefined' ? '' : this._master_label + ' '); - for (var i = 0; i < this._bins.length; ++i) { + let str = (typeof(this._master_label) === 'undefined' ? '' : this._master_label + ' '); + for (let i = 0; i < this._bins.length; ++i) { str += this._bins[i].label + ' (' + this._bins[i].start_val + (i === this._bins.length - 1 ? '+' : '-' + this._bins[i + 1].start_val) + diff --git a/src/util/http_recorder.js b/src/util/http_recorder.js index 5fd82ff6be..f5b439a89c 100644 --- a/src/util/http_recorder.js +++ b/src/util/http_recorder.js @@ -25,7 +25,7 @@ class HTTPRecorder extends stream.Writable { this._parser.reinitialize(HTTPParser.REQUEST, true); let slow_url = ''; - let slow_headers = []; + const slow_headers = []; // `headers` and `url` are set only if .onHeaders() has not been called for // this request. @@ -92,7 +92,7 @@ class HTTPRecorder extends stream.Writable { } _write(data, encoding, next) { - let buf = encoding ? Buffer.from(data, encoding) : data; + const buf = encoding ? Buffer.from(data, encoding) : data; if (this._out_file) { this._out_file.write(buf); } else { diff --git a/src/util/ifconfig.js b/src/util/ifconfig.js index b775df5b65..6320511919 100644 --- a/src/util/ifconfig.js +++ b/src/util/ifconfig.js @@ -1,8 +1,8 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var os = require('os'); +const _ = require('lodash'); +const os = require('os'); module.exports = { get_main_external_ipv4: get_main_external_ipv4, @@ -20,7 +20,7 @@ module.exports = { * */ function get_main_external_ipv4() { - var ips = get_external_ipv4(); + const ips = get_external_ipv4(); if (!_.isArray(ips)) { return ips; } @@ -41,7 +41,7 @@ function get_main_external_ipv4() { * */ function get_external_ipv4() { - var ips; + let ips; _.each(os.networkInterfaces(), function(ifcs, name) { _.each(ifcs, function(ifc) { if (ifc.internal || !ifc.address || ifc.family !== 'IPv4') { diff --git a/src/util/js_utils.js b/src/util/js_utils.js index 32badcb782..8d34210c4b 100644 --- a/src/util/js_utils.js +++ b/src/util/js_utils.js @@ -29,7 +29,7 @@ function self_bind(object, method_desc) { return; } - var func = object[method_desc]; + const func = object[method_desc]; // create a closure function that applies the original function on object function closure_func() { @@ -43,7 +43,7 @@ function self_bind(object, method_desc) { // see http://jsperf.com/concat-vs-push-apply/39 -var _cached_array_push = Array.prototype.push; +const _cached_array_push = Array.prototype.push; /** @@ -67,7 +67,7 @@ function array_push_keep_latest(array, items, limit) { * add to array, create it in the object if doesnt exist */ function named_array_push(obj, arr_name, item) { - var arr = obj[arr_name]; + let arr = obj[arr_name]; if (arr) { _cached_array_push.call(arr, item); } else { @@ -96,7 +96,7 @@ function deep_freeze(obj) { // Freeze all properties const keys = Object.keys(obj); - for (var i = 0; i < keys.length; ++i) { + for (let i = 0; i < keys.length; ++i) { const k = keys[i]; const v = obj[k]; deep_freeze(v); @@ -146,7 +146,7 @@ function sort_compare_by(key_getter, order) { class PackedObject { constructor(obj) { const keys = Object.keys(obj); - for (var i = 0; i < keys.length; ++i) { + for (let i = 0; i < keys.length; ++i) { this[keys[i]] = obj[keys[i]]; } } diff --git a/src/util/keys_lock.js b/src/util/keys_lock.js index 7a5f5a07b5..4dc0c09cf5 100644 --- a/src/util/keys_lock.js +++ b/src/util/keys_lock.js @@ -24,7 +24,7 @@ class KeysLock { throw new TypeError('Keys should be an array'); } - let lock_item = { + const lock_item = { keys: keys, locked: false, }; diff --git a/src/util/linked_list.js b/src/util/linked_list.js index aeae1a049c..aae7210d98 100644 --- a/src/util/linked_list.js +++ b/src/util/linked_list.js @@ -17,7 +17,7 @@ class LinkedList { insert_after(item, new_item) { this.check_item(item); this.check_new_item(new_item); - var next = item[this.next]; + const next = item[this.next]; new_item[this.next] = next; new_item[this.prev] = item; new_item[this.head] = this; @@ -30,7 +30,7 @@ class LinkedList { insert_before(item, new_item) { this.check_item(item); this.check_new_item(new_item); - var prev = item[this.prev]; + const prev = item[this.prev]; new_item[this.next] = item; new_item[this.prev] = prev; new_item[this.head] = this; @@ -41,8 +41,8 @@ class LinkedList { } remove(item) { - var next = item[this.next]; - var prev = item[this.prev]; + const next = item[this.next]; + const prev = item[this.prev]; if (!next || !prev) { return false; // already removed } @@ -57,12 +57,12 @@ class LinkedList { } get_next(item) { - var next = item[this.next]; + const next = item[this.next]; return next === this ? null : next; } get_prev(item) { - var prev = item[this.prev]; + const prev = item[this.prev]; return prev === this ? null : prev; } @@ -87,7 +87,7 @@ class LinkedList { } pop_front() { - var item = this.get_front(); + const item = this.get_front(); if (item) { this.remove(item); return item; @@ -95,7 +95,7 @@ class LinkedList { } pop_back() { - var item = this.get_back(); + const item = this.get_back(); if (item) { this.remove(item); return item; @@ -123,8 +123,8 @@ class LinkedList { return ''; } - var cur = this.get_front(); - var str = String(cur); + let cur = this.get_front(); + let str = String(cur); while (cur) { cur = this.get_next(cur); str += ', ' + cur; diff --git a/src/util/lru.js b/src/util/lru.js index 5b4cc9d7eb..8da792c5a3 100644 --- a/src/util/lru.js +++ b/src/util/lru.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -let _ = require('lodash'); -let assert = require('assert'); -let LinkedList = require('./linked_list'); +const _ = require('lodash'); +const assert = require('assert'); +const LinkedList = require('./linked_list'); -let DEFAULT_PARAMS = { +const DEFAULT_PARAMS = { max_usage: 32, expiry_ms: 0, name: '', @@ -43,7 +43,7 @@ class LRU { let item = this.find_item(id); if (!item) { // miss - insert new item on front - let now = this.params.expiry_ms ? Date.now() : 0; + const now = this.params.expiry_ms ? Date.now() : 0; item = new LRUItem(this, id, now); this._add_item(item); } @@ -52,8 +52,8 @@ class LRU { // return the item from the LRU cache, create it if missing. find_item(id) { - let item = this.map.get(id); - let now = this.params.expiry_ms ? Date.now() : 0; + const item = this.map.get(id); + const now = this.params.expiry_ms ? Date.now() : 0; if (item) { // check if not expired if (!this.params.expiry_ms || (now < item.time + this.params.expiry_ms)) { @@ -81,7 +81,7 @@ class LRU { // setting the item usage before updating // so that if the update will decides to discard this // current item it will be able to account it. - let diff = usage - item.usage; + const diff = usage - item.usage; item.usage = usage; if (this.list.is_linked(item)) { this._update_usage(diff); @@ -95,7 +95,7 @@ class LRU { _update_usage(diff) { this.usage += diff; while (this.usage > this.params.max_usage && this.list.length) { - let item = this.list.get_back(); + const item = this.list.get_back(); this._remove_item(item); } } diff --git a/src/util/lru_cache.js b/src/util/lru_cache.js index ea7f329454..6e582045b8 100644 --- a/src/util/lru_cache.js +++ b/src/util/lru_cache.js @@ -1,8 +1,8 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var LRU = require('./lru'); +const _ = require('lodash'); +const LRU = require('./lru'); /** * @template T params @@ -101,8 +101,8 @@ class LRUCache { * @returns {V | undefined} */ peek_cache(params) { - let key = this.make_key(params); - let item = this.lru.find_item(key); + const key = this.make_key(params); + const item = this.lru.find_item(key); if (item && item.d) { return this.make_val(item.d, params); } @@ -113,11 +113,11 @@ class LRUCache { * @param {V} data */ put_in_cache(params, data) { - var key = this.make_key(params); - var item = this.lru.find_or_add_item(key); + const key = this.make_key(params); + const item = this.lru.find_or_add_item(key); item.d = data; if (this.item_usage) { - let usage = this.item_usage(data, params); + const usage = this.item_usage(data, params); this.lru.set_usage(item, usage); } } @@ -143,7 +143,7 @@ class LRUCache { * @param {T} params */ invalidate(params) { - var key = this.make_key(params); + const key = this.make_key(params); return this.invalidate_key(key); } @@ -152,7 +152,7 @@ class LRUCache { * @param {K} key */ invalidate_key(key) { - var item = this.lru.remove_item(key); + const item = this.lru.remove_item(key); if (item && item.val) { return item.val; } diff --git a/src/util/mongo_client.js b/src/util/mongo_client.js index 84b9bf5528..ff66fcfe61 100644 --- a/src/util/mongo_client.js +++ b/src/util/mongo_client.js @@ -87,7 +87,7 @@ class MongoSequence { const query = {}; const update = { $inc: { object_version_seq: 1 } }; const options = { upsert: true, returnOriginal: false }; - let res = await this._collection.findOneAndUpdate(query, update, options); + const res = await this._collection.findOneAndUpdate(query, update, options); return res.value.object_version_seq; } } @@ -631,9 +631,9 @@ class MongoClient extends EventEmitter { } initiate_replica_set(set, members, is_config_set) { - var port = is_config_set ? config.MONGO_DEFAULTS.CFG_PORT : config.MONGO_DEFAULTS.SHARD_SRV_PORT; - var rep_config = this._build_replica_config(set, members, port, is_config_set); - var command = { + const port = is_config_set ? config.MONGO_DEFAULTS.CFG_PORT : config.MONGO_DEFAULTS.SHARD_SRV_PORT; + const rep_config = this._build_replica_config(set, members, port, is_config_set); + const command = { replSetInitiate: rep_config }; dbg.log0('Calling initiate_replica_set', util.inspect(command, false, null)); @@ -647,10 +647,10 @@ class MongoClient extends EventEmitter { } replica_update_members(set, members, is_config_set) { - var port = is_config_set ? config.MONGO_DEFAULTS.CFG_PORT : config.MONGO_DEFAULTS.SHARD_SRV_PORT; - var rep_config = this._build_replica_config(set, members, port, is_config_set); + const port = is_config_set ? config.MONGO_DEFAULTS.CFG_PORT : config.MONGO_DEFAULTS.SHARD_SRV_PORT; + const rep_config = this._build_replica_config(set, members, port, is_config_set); - var command = { + const command = { replSetReconfig: rep_config }; return P.resolve(this.get_rs_version(is_config_set)) @@ -705,12 +705,12 @@ class MongoClient extends EventEmitter { if (!this.mongo_client) { throw new Error('db is not initialized'); } - let options = params || {}; + const options = params || {}; const is_config_set = options.is_config_set; const COMMAND_TIMEOUT = options.timeout || 5000; - var command = { + const command = { replSetGetStatus: 1 }; @@ -734,7 +734,7 @@ class MongoClient extends EventEmitter { } async get_rs_version(is_config_set) { - var command = { + const command = { replSetGetConfig: 1 }; let res; @@ -758,7 +758,7 @@ class MongoClient extends EventEmitter { } async set_debug_level(level) { - var command = { + const command = { setParameter: 1, logLevel: level }; @@ -795,7 +795,7 @@ class MongoClient extends EventEmitter { // eslint-disable-next-line no-unmodified-loop-condition while (db_is_down && !waiting_exhausted) { try { - let stats = this.mongo_client && await this.mongo_client.db().stats(); + const stats = this.mongo_client && await this.mongo_client.db().stats(); db_is_down = _.get(stats, 'ok') !== 1; } catch (err) { dbg.error('db is still down. got error on db.stats():', err.message); @@ -845,7 +845,7 @@ class MongoClient extends EventEmitter { } async force_mongo_sync_journal() { - var command = { + const command = { fsync: 1, async: false, @@ -879,12 +879,12 @@ class MongoClient extends EventEmitter { } _build_replica_config(set, members, port, is_config_set) { - var rep_config = { + const rep_config = { _id: set, configsvr: (_.isUndefined(is_config_set)) ? false : is_config_set, members: [] }; - var id = 0; + let id = 0; _.each(members, function(m) { rep_config.members.push({ _id: id, diff --git a/src/util/mongo_functions.js b/src/util/mongo_functions.js index 4bf151ede2..3e91ef04a9 100644 --- a/src/util/mongo_functions.js +++ b/src/util/mongo_functions.js @@ -18,9 +18,9 @@ // declare names that these functions expect to have in scope // so that lint tools will not give warnings. -let emit = (key, value) => value; -let prefix = ''; -let delimiter = ''; +const emit = (key, value) => value; +const prefix = ''; +const delimiter = ''; /** * @this mongodb doc being mapped @@ -29,8 +29,8 @@ let delimiter = ''; * In case of an object it will emit the object key with the object itself. */ function map_common_prefixes() { - var suffix = this.key.slice(prefix.length); - var pos = suffix.indexOf(delimiter); + const suffix = this.key.slice(prefix.length); + const pos = suffix.indexOf(delimiter); if (pos >= 0) { emit([suffix.slice(0, pos + 1), 'common_prefix'], 1); } else { @@ -47,8 +47,8 @@ function reduce_common_prefixes(key, values) { if (key[1] === 'common_prefix') { // For common prefixes we count the number of objects that were emitted on that prefix // This count is not really used, so we could also just return 1, but we count it anyway. - var count = 0; - for (var i = 0; i < values.length; ++i) count += values[i]; + let count = 0; + for (let i = 0; i < values.length; ++i) count += values[i]; return count; } else { // Objects are uniquely emitted with their _id, so we do not expect multiple values. @@ -68,7 +68,7 @@ function map_aggregate_objects() { emit([this.bucket, 'count'], 1); // map for histogram calcs - emit the size and count with a key that is the log2 of the size - var pow = 0; + let pow = 0; if (this.size > 1) { pow = Math.ceil(Math.log2(this.size)); } @@ -101,8 +101,8 @@ function map_aggregate_blocks() { * @this mongodb doc being mapped */ function map_key_with_prefix_delimiter() { - var suffix = this.key.slice(prefix.length); - var pos = suffix.indexOf(delimiter); + const suffix = this.key.slice(prefix.length); + const pos = suffix.indexOf(delimiter); if (pos >= 0) { emit(suffix.slice(0, pos), undefined); } @@ -114,9 +114,9 @@ function map_key_with_prefix_delimiter() { // this function must be self contained to be able to send to mongo mapReduce() // so not using any functions or constants from above. function reduce_sum(key, values) { - var PETABYTE = 1024 * 1024 * 1024 * 1024 * 1024; - var n = 0; - var peta = 0; + const PETABYTE = 1024 * 1024 * 1024 * 1024 * 1024; + let n = 0; + let peta = 0; values.forEach(function(v) { if (typeof(v) === 'number') { n += v; @@ -142,7 +142,7 @@ const func_stats_exports = (function() { // the map/reduce/finalize function below and prevent lint errors. let step; let max_samples; - let percentiles = []; + const percentiles = []; /** * @this mongodb doc being mapped diff --git a/src/util/nb_native.js b/src/util/nb_native.js index bff7926a2f..16ad487571 100644 --- a/src/util/nb_native.js +++ b/src/util/nb_native.js @@ -12,7 +12,7 @@ const child_process = require('child_process'); const async_exec = util.promisify(child_process.exec); const async_delay = util.promisify(setTimeout); -var nb_native_napi; +let nb_native_napi; function nb_native() { diff --git a/src/util/net_utils.js b/src/util/net_utils.js index 0d7a07bea9..d22e332d9f 100644 --- a/src/util/net_utils.js +++ b/src/util/net_utils.js @@ -32,8 +32,8 @@ async function ping(target, options) { options = options || DEFAULT_PING_OPTIONS; _.defaults(options, DEFAULT_PING_OPTIONS); - let session = net_ping.createSession(options); - let candidate_ip = url.parse(target).hostname || target; + const session = net_ping.createSession(options); + const candidate_ip = url.parse(target).hostname || target; if (net.isIP(candidate_ip)) { await _ping_ip(session, candidate_ip); diff --git a/src/util/os_utils.js b/src/util/os_utils.js index f410d143bb..c33ed61e85 100644 --- a/src/util/os_utils.js +++ b/src/util/os_utils.js @@ -83,11 +83,11 @@ async function exec(command, options) { */ function fork(command, input_args, opts, ignore_rc) { return new Promise((resolve, reject) => { - let options = opts || {}; - let args = input_args || []; + const options = opts || {}; + const args = input_args || []; dbg.log0('fork:', command, args.join(' '), options, ignore_rc); options.stdio = options.stdio || 'inherit'; - var proc = child_process.fork(command, args, options); + const proc = child_process.fork(command, args, options); proc.on('exit', code => { if (code === 0 || ignore_rc) { resolve(); @@ -124,7 +124,7 @@ function spawn(command, args, options, ignore_rc, unref, timeout_ms) { args = args || []; dbg.log0('spawn:', command, args.join(' '), options, ignore_rc); options.stdio = options.stdio || 'inherit'; - var proc = child_process.spawn(command, args, options); + const proc = child_process.spawn(command, args, options); proc.on('exit', code => { if (code === 0 || ignore_rc) { resolve(); @@ -162,12 +162,12 @@ function spawn(command, args, options, ignore_rc, unref, timeout_ms) { function os_info() { //Convert X.Y eth name style to X-Y as mongo doesn't accept . in it's keys - var orig_ifaces = os.networkInterfaces(); - var interfaces = _.clone(orig_ifaces); + const orig_ifaces = os.networkInterfaces(); + const interfaces = _.clone(orig_ifaces); _.each(orig_ifaces, function(iface, name) { if (name.indexOf('.') !== -1) { - var new_name = name.replace(/\./g, '-'); + const new_name = name.replace(/\./g, '-'); interfaces[new_name] = iface; delete interfaces[name]; } @@ -253,7 +253,7 @@ function get_raw_storage() { } else { return read_drives() .then(drives => { - let root = drives.find(drive => drive.mount === '/'); + const root = drives.find(drive => drive.mount === '/'); if (root) { return root.storage.total; } else { @@ -287,7 +287,7 @@ function get_distro() { // calculate cpu) function calc_cpu_usage(current_cpus, previous_cpus) { previous_cpus = previous_cpus || [{ times: { user: 0, nice: 0, sys: 0, idle: 0, irq: 0, } }]; - let previous_cpus_reduced = previous_cpus.map(cpu => cpu.times).reduce((prev, curr) => ({ + const previous_cpus_reduced = previous_cpus.map(cpu => cpu.times).reduce((prev, curr) => ({ user: prev.user + curr.user, nice: prev.nice + curr.nice, sys: prev.sys + curr.sys, @@ -295,7 +295,7 @@ function calc_cpu_usage(current_cpus, previous_cpus) { irq: prev.irq + curr.irq })); // sum current cpus, and subtract the sum of previous cpus (take negative of prev_sum as initial val) - let current_cpus_reduced = current_cpus.map(cpu => cpu.times).reduce((prev, curr) => ({ + const current_cpus_reduced = current_cpus.map(cpu => cpu.times).reduce((prev, curr) => ({ user: prev.user + curr.user, nice: prev.nice + curr.nice, sys: prev.sys + curr.sys, @@ -303,8 +303,8 @@ function calc_cpu_usage(current_cpus, previous_cpus) { irq: prev.irq + curr.irq }), _.mapValues(previous_cpus_reduced, val => (-1) * val)); - let total = _.reduce(current_cpus_reduced, (a, b) => a + b); - let usage = 1 - (current_cpus_reduced.idle / total); // return the time not in idle + const total = _.reduce(current_cpus_reduced, (a, b) => a + b); + const usage = 1 - (current_cpus_reduced.idle / total); // return the time not in idle return usage; } @@ -377,15 +377,15 @@ async function get_drive_of_path(file_path) { function remove_linux_readonly_drives(volumes) { if (IS_MAC) return volumes; // grep command to get read only filesystems from /proc/mount - let grep_command = 'grep "\\sro[\\s,]" /proc/mounts'; + const grep_command = 'grep "\\sro[\\s,]" /proc/mounts'; return exec(grep_command, { ignore_rc: true, return_stdout: true, }) .then(grep_res => { - let ro_drives = grep_res.split('\n').map(drive => drive.split(' ')[0]); + const ro_drives = grep_res.split('\n').map(drive => drive.split(' ')[0]); // only use volumes that are not one of the ro_drives. - let ret_vols = volumes.filter(vol => ro_drives.indexOf(vol.drive_id) === -1); + const ret_vols = volumes.filter(vol => ro_drives.indexOf(vol.drive_id) === -1); return ret_vols; }); } @@ -443,7 +443,7 @@ function linux_volume_to_drive(vol, size_by_bd, skip) { } function top_single(dst) { - var file_redirect = dst ? ' &> ' + dst : ''; + const file_redirect = dst ? ' &> ' + dst : ''; if (IS_MAC) { return exec('top -c -l 1' + file_redirect); } else if (IS_LINUX) { @@ -505,7 +505,7 @@ function _set_dns_server(servers) { } function get_time_config() { - var reply = { + const reply = { srv_time: 0, timezone: '', status: false @@ -516,7 +516,7 @@ function get_time_config() { return_stdout: true, }) .then(tzone => { - var symlink = tzone.split('>')[1].split('/zoneinfo/')[1].trim(); + const symlink = tzone.split('>')[1].split('/zoneinfo/')[1].trim(); reply.srv_time = moment().tz(symlink) .format(); reply.timezone = symlink; @@ -525,8 +525,8 @@ function get_time_config() { } function get_local_ipv4_ips() { - var ifaces = os.networkInterfaces(); - var ips = []; + const ifaces = os.networkInterfaces(); + const ips = []; _.each(ifaces, function(iface) { _.each(iface, function(ifname) { //Don't count non IPv4 or Internals @@ -541,7 +541,7 @@ function get_local_ipv4_ips() { } function get_networking_info() { - var ifaces = os.networkInterfaces(); + const ifaces = os.networkInterfaces(); return ifaces; } @@ -642,9 +642,9 @@ function restart_noobaa_services() { dbg.warn('RESTARTING SERVICES!!!', (new Error()).stack); - var fname = '/tmp/spawn.log'; - var stdout = fs.openSync(fname, 'a'); - var stderr = fs.openSync(fname, 'a'); + const fname = '/tmp/spawn.log'; + const stdout = fs.openSync(fname, 'a'); + const stderr = fs.openSync(fname, 'a'); spawn('nohup', [ '/usr/bin/supervisorctl', 'restart', @@ -726,7 +726,7 @@ function _check_ports_on_linux(dest_ips, start_port, end_port) { return true; } - let ports_groups = _.groupBy(_.range(start_port, end_port + 1), port => { + const ports_groups = _.groupBy(_.range(start_port, end_port + 1), port => { // go over all relevant rules, and look for the first matching rule (maybe partial match) for (const rule of filtered_rules) { if (port >= rule.start_port && port <= rule.end_port) { @@ -754,14 +754,14 @@ function get_iptables_rules() { }) .then(output => { // split output to lines, and remove first two lines (title lines) and empty lines - let raw_rules = output.split('\n') + const raw_rules = output.split('\n') .slice(2) .filter(line => Boolean(line.length)); return raw_rules.map(line => { line = line.trim(); // split by spaces to different attributes, but limit to 9. the last attribute // can contain spaces, so we will extract it separately - let attributes = line.split(/\s+/, 9); + const attributes = line.split(/\s+/, 9); if (attributes.length !== 9) { throw new Error('Failed parsing iptables output. expected split to return 9 fields'); } diff --git a/src/util/postgres_client.js b/src/util/postgres_client.js index bf9c82a460..1adbbf5a92 100644 --- a/src/util/postgres_client.js +++ b/src/util/postgres_client.js @@ -134,7 +134,7 @@ function handle_ops_encoding(schema, val) { if (!val) return; - let obj = {}; + const obj = {}; // handle $all if (val.$all) { @@ -283,7 +283,7 @@ function buildPostgresArrayQuery(table_name, update, find) { let latest_set; _.map(Object.keys(update), to_set => { - let arr_and_property = to_set.split('.$.'); + const arr_and_property = to_set.split('.$.'); if (arr_and_property.length > 1) { arr_to_update = arr_and_property[0]; latest_set = `jsonb_set(${latest_set || 'data'}, array['${arr_to_update}', elem_index::text,` + @@ -464,13 +464,13 @@ class BulkOp { } findAndUpdateOne(find, update) { - let encoded_update = encode_json(this.schema, update); + const encoded_update = encode_json(this.schema, update); const pg_update = mongo_to_pg.convertUpdate('data', encoded_update); - let encoded_find = encode_json(this.schema, find); + const encoded_find = encode_json(this.schema, find); const pg_selector = mongo_to_pg('data', encoded_find, { disableContainmentQuery: true }); - let dollar_array_query = convert_array_query(this.name, encoded_update, encoded_find); + const dollar_array_query = convert_array_query(this.name, encoded_update, encoded_find); const query = dollar_array_query || `UPDATE ${this.name} SET data = ${pg_update} WHERE ${pg_selector}`; this.add_query(query); @@ -755,7 +755,7 @@ class PostgresTable { const encoded_find = encode_json(this.schema, selector); const pg_selector = mongo_to_pg('data', encoded_find, { disableContainmentQuery: true }); - let dollar_array_query = convert_array_query(this.name, encoded_update, encoded_find); + const dollar_array_query = convert_array_query(this.name, encoded_update, encoded_find); const query = (dollar_array_query || `UPDATE ${this.name} SET data = ${pg_update} WHERE ${pg_selector}`) + ' RETURNING _id, data'; try { @@ -773,7 +773,7 @@ class PostgresTable { // console.warn('JENIA updateOne', selector, update, options); const pg_update = mongo_to_pg.convertUpdate('data', encode_json(this.schema, update)); const pg_selector = mongo_to_pg('data', encode_json(this.schema, selector), { disableContainmentQuery: true }); - let query = `UPDATE ${this.name} SET data = ${pg_update} WHERE ${pg_selector} RETURNING _id, data`; + const query = `UPDATE ${this.name} SET data = ${pg_update} WHERE ${pg_selector} RETURNING _id, data`; try { const res = await this.single_query(query, null, client); assert(res.rowCount <= 1, `_id must be unique. found ${res.rowCount} rows with _id=${selector._id} in table ${this.name}`); @@ -1236,7 +1236,7 @@ class PostgresTable { // lock was already taken which means that another call to findOneAndUpdate should have inserted. // throw and retry dbg.log0(`advisory lock is taken. throwing and retrying`); - let err = new Error(`retry update after advisory lock release ${this.name}`); + const err = new Error(`retry update after advisory lock release ${this.name}`); err.retry = true; throw err; } diff --git a/src/util/rand_stream.js b/src/util/rand_stream.js index daa8ff49e4..7895e8df30 100644 --- a/src/util/rand_stream.js +++ b/src/util/rand_stream.js @@ -1,8 +1,8 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -let stream = require('stream'); -let crypto = require('crypto'); +const stream = require('stream'); +const crypto = require('crypto'); /** * diff --git a/src/util/range_utils.js b/src/util/range_utils.js index 618b41b7ce..797da3c2c9 100644 --- a/src/util/range_utils.js +++ b/src/util/range_utils.js @@ -16,8 +16,8 @@ module.exports = { * find the intersection between two ranges */ function intersection(start1, end1, start2, end2) { - var start = start1 > start2 ? start1 : start2; - var end = end1 < end2 ? end1 : end2; + const start = start1 > start2 ? start1 : start2; + const end = end1 < end2 ? end1 : end2; return (end <= start) ? null : { start: start, end: end, @@ -88,7 +88,7 @@ function align_up(offset, boundary) { * but only if such boundary exists between start and end. */ function truncate_range_end_to_boundary(start, end, boundary) { - var new_end = align_down(end, boundary); + const new_end = align_down(end, boundary); return (new_end > start) ? new_end : end; } diff --git a/src/util/schema_utils.js b/src/util/schema_utils.js index fbe91473f6..42bce4a146 100644 --- a/src/util/schema_utils.js +++ b/src/util/schema_utils.js @@ -81,7 +81,7 @@ function strictify(schema, options, base) { } function check_schema_extra_keywords(schema, base, keywords) { - let remain = _.omit(schema, COMMON_SCHEMA_KEYWORDS, keywords); + const remain = _.omit(schema, COMMON_SCHEMA_KEYWORDS, keywords); if (!_.isEmpty(remain)) { illegal_json_schema(schema, base, 'extra keywords in schema - ' + _.keys(remain)); } diff --git a/src/util/size_utils.js b/src/util/size_utils.js index ea2a318bdf..e1b05b67f3 100644 --- a/src/util/size_utils.js +++ b/src/util/size_utils.js @@ -75,8 +75,8 @@ function bigint_to_json(bi) { * take a json format {peta:.., n: ..} and convert to BigInteger */ function json_to_bigint(x) { - var n = 0; - var peta = 0; + let n = 0; + let peta = 0; if (x && typeof(x) === 'object' && 'n' in x && 'peta' in x) { n = Math.floor(x.n) || 0; peta = Math.floor(x.peta) || 0; @@ -155,7 +155,7 @@ function size_unit_to_bigint(size, unit) { * mult_factor & div_factor must be positive integers. */ function reduce_storage(reducer, storage_items, mult_factor, div_factor) { - let accumulator = _.reduce(storage_items, + const accumulator = _.reduce(storage_items, (acc, item) => { _.each(SOTRAGE_OBJ_KEYS, key => { if (item && !_.isUndefined(item[key])) { @@ -191,11 +191,11 @@ function sum_bigint_json(a, b) { function size_min(values) { - var n_min = PETABYTE; - var peta_min = 100000; + let n_min = PETABYTE; + let peta_min = 100000; values.forEach(function(v) { - var n = 0; - var peta = 0; + let n = 0; + let peta = 0; if (typeof(v) === 'number') { n = v; } else if (v) { @@ -219,11 +219,11 @@ function size_min(values) { } function size_max(values) { - var n_max = 0; - var peta_max = 0; + let n_max = 0; + let peta_max = 0; values.forEach(function(v) { - var n = 0; - var peta = 0; + let n = 0; + let peta = 0; if (typeof(v) === 'number') { n = v; } else if (v) { @@ -263,8 +263,8 @@ function reduce_maximum(key, values) { * { n: bytes, peta: petabytes } */ function human_size(bytes) { - var x; - var i = 0; + let x; + let i = 0; if (typeof(bytes) === 'object') { if (bytes.peta) { x = bytes.peta + (bytes.n / PETABYTE); @@ -304,12 +304,12 @@ function human_size(bytes) { * */ function human_offset(offset) { - var res = ''; - var sign = ''; - var i; - var n; - var peta; - var mod; + let res = ''; + let sign = ''; + let i; + let n; + let peta; + let mod; if (typeof(offset) === 'object') { peta = offset.peta; diff --git a/src/util/slice_reader.js b/src/util/slice_reader.js index d000038db4..8d0093a0ba 100644 --- a/src/util/slice_reader.js +++ b/src/util/slice_reader.js @@ -1,7 +1,7 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var stream = require('stream'); +const stream = require('stream'); /** * SliceReader is a Readable stream that uses slice on a source object. diff --git a/src/util/speedometer.js b/src/util/speedometer.js index 1167939ad2..10cb232b13 100644 --- a/src/util/speedometer.js +++ b/src/util/speedometer.js @@ -36,7 +36,7 @@ class Speedometer { } }); } - for (var i = 0; i < count; ++i) { + for (let i = 0; i < count; ++i) { const worker = cluster.fork(); console.warn('Worker start', worker.process.pid); } diff --git a/src/util/string_utils.js b/src/util/string_utils.js index 12f238fefd..ca16982c7c 100644 --- a/src/util/string_utils.js +++ b/src/util/string_utils.js @@ -32,7 +32,7 @@ function crypto_random_string(len, charset = ALPHA_NUMERIC_CHARSET) { } function left_pad_zeros(str, to_length) { - let num_zeros = to_length - str.length; + const num_zeros = to_length - str.length; let zeros = ''; if (num_zeros > 0) { zeros = '0'.repeat(num_zeros); @@ -79,13 +79,13 @@ function levenshtein_distance(s, t, fuzzy, stop_marker) { // use formula to fill in the rest of the row for (let j = 0; j < t.length; ++j) { - var cost = (s[i] === t[j]) ? 0 : 1; + const cost = (s[i] === t[j]) ? 0 : 1; v1[j + 1] = Math.min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost); } // copy v1 (current row) to v0 (previous row) for next iteration // bail if we already passed the stop marker, to allow to drop matches faster - var min = Infinity; + let min = Infinity; for (let j = 0; j < v0.length; ++j) { v0[j] = v1[j]; if (min > v1[j]) { diff --git a/src/util/url_utils.js b/src/util/url_utils.js index e04fc5743d..27af6654a0 100644 --- a/src/util/url_utils.js +++ b/src/util/url_utils.js @@ -1,11 +1,11 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -var _ = require('lodash'); -var url = require('url'); -var querystring = require('querystring'); +const _ = require('lodash'); +const url = require('url'); +const querystring = require('querystring'); -var QUICK_PARSE_REGEXP = /^\s*(\w+:)?(\/\/)?(([^:/[\]]*)|\[([a-fA-F0-9:.]*)\])?(:\d*)?(\/[^?#]*)?(\?[^#]*)?(#.*)?\s*$/; +const QUICK_PARSE_REGEXP = /^\s*(\w+:)?(\/\/)?(([^:/[\]]*)|\[([a-fA-F0-9:.]*)\])?(:\d*)?(\/[^?#]*)?(\?[^#]*)?(#.*)?\s*$/; /** * parse url string much faster than url.parse() - reduce the time to 1/10. @@ -23,8 +23,8 @@ function quick_parse(url_string, parse_query_string) { // we do it to avoid complexity, and since we use quick_parse on very specific places it doesn't matter for now. // we need to review it again if neccessary. url_string = url_string.toLowerCase(); - var match = url_string.match(QUICK_PARSE_REGEXP); - var u = new url.Url(); + const match = url_string.match(QUICK_PARSE_REGEXP); + const u = new url.Url(); if (!match) return u; u.href = url_string; u.protocol = match[1] || null; @@ -59,29 +59,29 @@ function construct_url(def) { } function benchmark() { - var testing_url = process.argv[2] || "http://localhost:4545/"; - var url_parse_res = url.parse(testing_url, true); - var quick_parse_res = quick_parse(testing_url, true); + const testing_url = process.argv[2] || "http://localhost:4545/"; + const url_parse_res = url.parse(testing_url, true); + const quick_parse_res = quick_parse(testing_url, true); console.log('\nurl.parse("' + testing_url + '") = ', url_parse_res); console.log('\nquick_parse("' + testing_url + '") = ', quick_parse_res); console.log(' '); _.forIn(url_parse_res, function(v1, k) { - var v2 = quick_parse_res[k]; + const v2 = quick_parse_res[k]; if (!_.isEqual(v1, v2)) { console.log('!!! Bad value quick_parse()', k + ': ' + JSON.stringify(v2), 'expected', JSON.stringify(v1)); } }); - var url_parse_fmt = url.format(url_parse_res); - var quick_parse_fmt = url.format(quick_parse_res); + const url_parse_fmt = url.format(url_parse_res); + const quick_parse_fmt = url.format(quick_parse_res); if (url_parse_fmt !== testing_url) { console.log('!!! Bad format(url.parse) =', url_parse_fmt, 'expected', testing_url); } if (quick_parse_fmt !== testing_url) { console.log('!!! Bad format(quick_parse) =', quick_parse_fmt, 'expected', testing_url); } - var tests = [ + const tests = [ function test_url_parse() { return url.parse(testing_url, true); }, @@ -89,17 +89,17 @@ function benchmark() { return quick_parse(testing_url, true); } ]; - for (var t = 0; t < tests.length; ++t) { - var test = tests[t]; + for (let t = 0; t < tests.length; ++t) { + const test = tests[t]; console.log('\nbenchmarking', test.name, '...'); - var count = 0; - var start = Date.now(); - var now = start; - var last_print = start; - var last_count = 0; - var speed; + let count = 0; + const start = Date.now(); + let now = start; + let last_print = start; + let last_count = 0; + let speed; do { - for (var i = 0; i < 5000; ++i) test(); + for (let i = 0; i < 5000; ++i) test(); count += 5000; now = Date.now(); if (now - last_print > 1000) { diff --git a/src/util/xml_utils.js b/src/util/xml_utils.js index 25347529f3..fd13313f80 100644 --- a/src/util/xml_utils.js +++ b/src/util/xml_utils.js @@ -54,8 +54,8 @@ function append_object(append, object) { for (const key of object_own_keys) { // undefined values skip encoding the key tag altogether - let val = object[key]; - let val_type = typeof(val); + const val = object[key]; + const val_type = typeof(val); if (val_type === 'undefined') continue; // keys starting with _ are not considered tag names From 7cfcad103806c2ff27955ca0408e112a7bf943dd Mon Sep 17 00:00:00 2001 From: shirady <57721533+shirady@users.noreply.github.com> Date: Sun, 16 Apr 2023 13:02:29 +0300 Subject: [PATCH 2/2] bump eslint version Signed-off-by: shirady <57721533+shirady@users.noreply.github.com> --- package-lock.json | 294 +++++++++++++++++++++++----------------------- package.json | 2 +- 2 files changed, 146 insertions(+), 150 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2827661d6c..5bff5cc906 100644 --- a/package-lock.json +++ b/package-lock.json @@ -74,7 +74,7 @@ "@types/node": "18.15.11", "@types/pg": "8.6.6", "@types/request": "2.48.8", - "eslint": "8.23.1", + "eslint": "8.38.0", "eslint-plugin-header": "3.1.1", "istanbul-lib-coverage": "3.2.0", "istanbul-lib-hook": "3.0.0", @@ -735,15 +735,39 @@ "node": ">=12" } }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.0.tgz", + "integrity": "sha512-vITaYzIcNmjn5tF5uxcZ/ft7/RXGrMUIS9HalWckEOF6ESiwXKoMzAQf2UW0aVd6rnOeExTJVd5hmWXucBKGXQ==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, "node_modules/@eslint/eslintrc": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-1.4.1.tgz", - "integrity": "sha512-XXrH9Uarn0stsyldqDYq8r++mROmWRI1xKMXa640Bb//SY1+ECYX6VzT6Lcx5frD0V30XieqJ0oX9I2Xj5aoMA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.2.tgz", + "integrity": "sha512-3W4f5tDUra+pA+FzgugqL2pRimUTDJWKr7BINqOpkZrC0uYI0NIc0/JFgBROCU07HR6GieA5m3/rsPIhDmCXTQ==", "dev": true, "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", - "espree": "^9.4.0", + "espree": "^9.5.1", "globals": "^13.19.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", @@ -798,9 +822,9 @@ } }, "node_modules/@eslint/eslintrc/node_modules/globals": { - "version": "13.19.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.19.0.tgz", - "integrity": "sha512-dkQ957uSRWHw7CFXLUtUHQI3g3aWApYhfNR2O6jn/907riyTYKVBmxYVROkBcY614FSSeSJh7Xm7SrUWCxvJMQ==", + "version": "13.20.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", + "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -842,6 +866,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@eslint/js": { + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.38.0.tgz", + "integrity": "sha512-IoD2MfUnOV58ghIHCiil01PcohxjbYR/qCxsoC+xNgUwh1EY8jOOrYmu3d3a71+tJJ23uscEV4X2HJWMsPJu4g==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, "node_modules/@gar/promisify": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", @@ -911,14 +944,14 @@ } }, "node_modules/@humanwhocodes/config-array": { - "version": "0.10.7", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.10.7.tgz", - "integrity": "sha512-MDl6D6sBsaV452/QSdX+4CXIjZhIcI0PELsxUjk4U828yd58vk3bTIvk/6w5FY+4hIy9sLW0sfrV7K7Kc++j/w==", + "version": "0.11.8", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", + "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", "dev": true, "dependencies": { "@humanwhocodes/object-schema": "^1.2.1", "debug": "^4.1.1", - "minimatch": "^3.0.4" + "minimatch": "^3.0.5" }, "engines": { "node": ">=10.10.0" @@ -941,16 +974,6 @@ } } }, - "node_modules/@humanwhocodes/gitignore-to-minimatch": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/gitignore-to-minimatch/-/gitignore-to-minimatch-1.0.2.tgz", - "integrity": "sha512-rSqmMJDdLFUsyxR6FMtD00nfQKKLFb1kv+qBbOVKqErvloEIJLo5bDTJTQNTYgeyp78JsA7u/NPi5jT1GR/MuA==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", @@ -3167,15 +3190,18 @@ } }, "node_modules/eslint": { - "version": "8.23.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.23.1.tgz", - "integrity": "sha512-w7C1IXCc6fNqjpuYd0yPlcTKKmHlHHktRkzmBPZ+7cvNBQuiNjx0xaMTjAJGCafJhQkrFJooREv0CtrVzmHwqg==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.38.0.tgz", + "integrity": "sha512-pIdsD2jwlUGf/U38Jv97t8lq6HpaU/G9NKbYmpWpZGw3LdTNhZLbJePqxOXGB5+JEKfOPU/XLxYxFh03nr1KTg==", "dev": true, "dependencies": { - "@eslint/eslintrc": "^1.3.2", - "@humanwhocodes/config-array": "^0.10.4", - "@humanwhocodes/gitignore-to-minimatch": "^1.0.2", + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.4.0", + "@eslint/eslintrc": "^2.0.2", + "@eslint/js": "8.38.0", + "@humanwhocodes/config-array": "^0.11.8", "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", "ajv": "^6.10.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", @@ -3183,22 +3209,21 @@ "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", "eslint-scope": "^7.1.1", - "eslint-utils": "^3.0.0", - "eslint-visitor-keys": "^3.3.0", - "espree": "^9.4.0", - "esquery": "^1.4.0", + "eslint-visitor-keys": "^3.4.0", + "espree": "^9.5.1", + "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^6.0.1", "find-up": "^5.0.0", - "glob-parent": "^6.0.1", - "globals": "^13.15.0", - "globby": "^11.1.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", "grapheme-splitter": "^1.0.4", "ignore": "^5.2.0", "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", "js-sdsl": "^4.1.4", "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", @@ -3207,7 +3232,6 @@ "minimatch": "^3.1.2", "natural-compare": "^1.4.0", "optionator": "^0.9.1", - "regexpp": "^3.2.0", "strip-ansi": "^6.0.1", "strip-json-comments": "^3.1.0", "text-table": "^0.2.0" @@ -3244,40 +3268,16 @@ "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, - "node_modules/eslint-utils": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-3.0.0.tgz", - "integrity": "sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==", - "dev": true, - "dependencies": { - "eslint-visitor-keys": "^2.0.0" - }, - "engines": { - "node": "^10.0.0 || ^12.0.0 || >= 14.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/mysticatea" - }, - "peerDependencies": { - "eslint": ">=5" - } - }, - "node_modules/eslint-utils/node_modules/eslint-visitor-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", - "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", - "dev": true, - "engines": { - "node": ">=10" - } - }, "node_modules/eslint-visitor-keys": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz", - "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.0.tgz", + "integrity": "sha512-HPpKPUBQcAsZOsHAFwTtIKcYlCje62XB7SEAcxjtmW6TD1WVpkS6i6/hOVtTZIl4zGj/mBqpFVGvaDneik+VoQ==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/eslint/node_modules/ajv": { @@ -3468,14 +3468,14 @@ } }, "node_modules/espree": { - "version": "9.4.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.4.1.tgz", - "integrity": "sha512-XwctdmTO6SIvCzd9810yyNzIrOrqNYV9Koizx4C/mRhf9uq0o4yHoCEU/670pOxOL/MSraektvSAji79kX90Vg==", + "version": "9.5.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.1.tgz", + "integrity": "sha512-5yxtHSZXRSW5pvv3hAlXM5+/Oswi1AUFqBmbibKb5s6bp3rGIDkyXU6xCoyuuLhijr4SFwPrXRoZjz0AZDN9tg==", "dev": true, "dependencies": { "acorn": "^8.8.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.3.0" + "eslint-visitor-keys": "^3.4.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -3497,9 +3497,9 @@ } }, "node_modules/esquery": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz", - "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", "dev": true, "dependencies": { "estraverse": "^5.1.0" @@ -4989,6 +4989,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/is-plain-obj": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", @@ -8074,18 +8083,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/regexpp": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", - "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/mysticatea" - } - }, "node_modules/release-zalgo": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", @@ -10398,15 +10395,30 @@ "@jridgewell/trace-mapping": "0.3.9" } }, + "@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^3.3.0" + } + }, + "@eslint-community/regexpp": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.0.tgz", + "integrity": "sha512-vITaYzIcNmjn5tF5uxcZ/ft7/RXGrMUIS9HalWckEOF6ESiwXKoMzAQf2UW0aVd6rnOeExTJVd5hmWXucBKGXQ==", + "dev": true + }, "@eslint/eslintrc": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-1.4.1.tgz", - "integrity": "sha512-XXrH9Uarn0stsyldqDYq8r++mROmWRI1xKMXa640Bb//SY1+ECYX6VzT6Lcx5frD0V30XieqJ0oX9I2Xj5aoMA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.2.tgz", + "integrity": "sha512-3W4f5tDUra+pA+FzgugqL2pRimUTDJWKr7BINqOpkZrC0uYI0NIc0/JFgBROCU07HR6GieA5m3/rsPIhDmCXTQ==", "dev": true, "requires": { "ajv": "^6.12.4", "debug": "^4.3.2", - "espree": "^9.4.0", + "espree": "^9.5.1", "globals": "^13.19.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", @@ -10443,9 +10455,9 @@ } }, "globals": { - "version": "13.19.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.19.0.tgz", - "integrity": "sha512-dkQ957uSRWHw7CFXLUtUHQI3g3aWApYhfNR2O6jn/907riyTYKVBmxYVROkBcY614FSSeSJh7Xm7SrUWCxvJMQ==", + "version": "13.20.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", + "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", "dev": true, "requires": { "type-fest": "^0.20.2" @@ -10474,6 +10486,12 @@ } } }, + "@eslint/js": { + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.38.0.tgz", + "integrity": "sha512-IoD2MfUnOV58ghIHCiil01PcohxjbYR/qCxsoC+xNgUwh1EY8jOOrYmu3d3a71+tJJ23uscEV4X2HJWMsPJu4g==", + "dev": true + }, "@gar/promisify": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", @@ -10530,14 +10548,14 @@ } }, "@humanwhocodes/config-array": { - "version": "0.10.7", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.10.7.tgz", - "integrity": "sha512-MDl6D6sBsaV452/QSdX+4CXIjZhIcI0PELsxUjk4U828yd58vk3bTIvk/6w5FY+4hIy9sLW0sfrV7K7Kc++j/w==", + "version": "0.11.8", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", + "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", "dev": true, "requires": { "@humanwhocodes/object-schema": "^1.2.1", "debug": "^4.1.1", - "minimatch": "^3.0.4" + "minimatch": "^3.0.5" }, "dependencies": { "debug": { @@ -10551,12 +10569,6 @@ } } }, - "@humanwhocodes/gitignore-to-minimatch": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/gitignore-to-minimatch/-/gitignore-to-minimatch-1.0.2.tgz", - "integrity": "sha512-rSqmMJDdLFUsyxR6FMtD00nfQKKLFb1kv+qBbOVKqErvloEIJLo5bDTJTQNTYgeyp78JsA7u/NPi5jT1GR/MuA==", - "dev": true - }, "@humanwhocodes/module-importer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", @@ -12276,15 +12288,18 @@ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" }, "eslint": { - "version": "8.23.1", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.23.1.tgz", - "integrity": "sha512-w7C1IXCc6fNqjpuYd0yPlcTKKmHlHHktRkzmBPZ+7cvNBQuiNjx0xaMTjAJGCafJhQkrFJooREv0CtrVzmHwqg==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.38.0.tgz", + "integrity": "sha512-pIdsD2jwlUGf/U38Jv97t8lq6HpaU/G9NKbYmpWpZGw3LdTNhZLbJePqxOXGB5+JEKfOPU/XLxYxFh03nr1KTg==", "dev": true, "requires": { - "@eslint/eslintrc": "^1.3.2", - "@humanwhocodes/config-array": "^0.10.4", - "@humanwhocodes/gitignore-to-minimatch": "^1.0.2", + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.4.0", + "@eslint/eslintrc": "^2.0.2", + "@eslint/js": "8.38.0", + "@humanwhocodes/config-array": "^0.11.8", "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", "ajv": "^6.10.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", @@ -12292,22 +12307,21 @@ "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", "eslint-scope": "^7.1.1", - "eslint-utils": "^3.0.0", - "eslint-visitor-keys": "^3.3.0", - "espree": "^9.4.0", - "esquery": "^1.4.0", + "eslint-visitor-keys": "^3.4.0", + "espree": "^9.5.1", + "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^6.0.1", "find-up": "^5.0.0", - "glob-parent": "^6.0.1", - "globals": "^13.15.0", - "globby": "^11.1.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", "grapheme-splitter": "^1.0.4", "ignore": "^5.2.0", "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", "js-sdsl": "^4.1.4", "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", @@ -12316,7 +12330,6 @@ "minimatch": "^3.1.2", "natural-compare": "^1.4.0", "optionator": "^0.9.1", - "regexpp": "^3.2.0", "strip-ansi": "^6.0.1", "strip-json-comments": "^3.1.0", "text-table": "^0.2.0" @@ -12468,38 +12481,21 @@ "estraverse": "^5.2.0" } }, - "eslint-utils": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-3.0.0.tgz", - "integrity": "sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^2.0.0" - }, - "dependencies": { - "eslint-visitor-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", - "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", - "dev": true - } - } - }, "eslint-visitor-keys": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz", - "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.0.tgz", + "integrity": "sha512-HPpKPUBQcAsZOsHAFwTtIKcYlCje62XB7SEAcxjtmW6TD1WVpkS6i6/hOVtTZIl4zGj/mBqpFVGvaDneik+VoQ==", "dev": true }, "espree": { - "version": "9.4.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.4.1.tgz", - "integrity": "sha512-XwctdmTO6SIvCzd9810yyNzIrOrqNYV9Koizx4C/mRhf9uq0o4yHoCEU/670pOxOL/MSraektvSAji79kX90Vg==", + "version": "9.5.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.1.tgz", + "integrity": "sha512-5yxtHSZXRSW5pvv3hAlXM5+/Oswi1AUFqBmbibKb5s6bp3rGIDkyXU6xCoyuuLhijr4SFwPrXRoZjz0AZDN9tg==", "dev": true, "requires": { "acorn": "^8.8.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.3.0" + "eslint-visitor-keys": "^3.4.0" } }, "esprima": { @@ -12508,9 +12504,9 @@ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" }, "esquery": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz", - "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", "dev": true, "requires": { "estraverse": "^5.1.0" @@ -13619,6 +13615,12 @@ "has-tostringtag": "^1.0.0" } }, + "is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true + }, "is-plain-obj": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", @@ -15886,12 +15888,6 @@ "functions-have-names": "^1.2.2" } }, - "regexpp": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz", - "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==", - "dev": true - }, "release-zalgo": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", diff --git a/package.json b/package.json index 31199cf826..392d0f146c 100644 --- a/package.json +++ b/package.json @@ -130,7 +130,7 @@ "@types/node": "18.15.11", "@types/pg": "8.6.6", "@types/request": "2.48.8", - "eslint": "8.23.1", + "eslint": "8.38.0", "eslint-plugin-header": "3.1.1", "istanbul-lib-coverage": "3.2.0", "istanbul-lib-hook": "3.0.0",