diff --git a/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_bucket_mirror_sync_policy.yaml b/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_bucket_mirror_sync_policy.yaml index e86c3ff7c..2ed2a39ac 100644 --- a/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_bucket_mirror_sync_policy.yaml +++ b/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_bucket_mirror_sync_policy.yaml @@ -2,16 +2,21 @@ config: user_count: 1 bucket_count: 2 + objects_count: 25 + objects_size_range: + min: 5K + max: 2M multisite_global_sync_policy: true multisite_sync_policy: true test_ops: create_bucket: true + create_object: true group_create: true - group_remove: false + group_remove: true flow_create: true - flow_remove: false + flow_remove: true pipe_create: true - pipe_remove: false - group_status: allowed # Enable , Allowed, Forbidden + pipe_remove: true + group_status: enabled # Enable , Allowed, Forbidden bucket_group_status: enabled flow_type: symmetrical # symmetrical , directional diff --git a/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_mirror_sync_policy.yaml b/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_mirror_sync_policy.yaml index 2e032b218..1726b2bea 100644 --- a/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_mirror_sync_policy.yaml +++ b/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_mirror_sync_policy.yaml @@ -2,12 +2,17 @@ config: user_count: 1 bucket_count: 2 + objects_count: 25 + objects_size_range: + min: 5K + max: 2M multisite_global_sync_policy: true multisite_sync_policy: false test_ops: create_bucket: true + create_object: true group_create: true - group_remove: false + group_remove: true flow_create: true flow_remove: false pipe_create: true diff --git a/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_sync_policy.yaml b/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_sync_policy.yaml index 3c47479a9..49b699569 100644 --- a/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_sync_policy.yaml +++ b/rgw/v2/tests/s3_swift/multisite_configs/test_multisite_sync_policy.yaml @@ -3,6 +3,10 @@ config: user_count: 1 bucket_count: 2 + objects_count: 0 + objects_size_range: + min: 5K + max: 2M multisite_global_sync_policy: true multisite_sync_policy: true test_ops: @@ -14,5 +18,5 @@ config: pipe_create: true pipe_remove: true group_status: enabled # Enable , Allowed, Forbidden - bucket_group_status: enabled + bucket_group_status: allowed flow_type: symmetrical # symmetrical , directional diff --git a/rgw/v2/tests/s3_swift/multisite_configs/test_sync_policy_state_change.yaml b/rgw/v2/tests/s3_swift/multisite_configs/test_sync_policy_state_change.yaml index 4f3f87bb3..d7725b093 100644 --- a/rgw/v2/tests/s3_swift/multisite_configs/test_sync_policy_state_change.yaml +++ b/rgw/v2/tests/s3_swift/multisite_configs/test_sync_policy_state_change.yaml @@ -2,6 +2,10 @@ config: user_count: 1 bucket_count: 2 + objects_count: 0 + objects_size_range: + min: 5K + max: 2M multisite_global_sync_policy: true multisite_sync_policy: false test_ops: diff --git a/rgw/v2/tests/s3_swift/reusable.py b/rgw/v2/tests/s3_swift/reusable.py index ff7b8325e..a0dbe48b2 100644 --- a/rgw/v2/tests/s3_swift/reusable.py +++ b/rgw/v2/tests/s3_swift/reusable.py @@ -1700,23 +1700,147 @@ def group_operation(group_id, group_op, group_status="enabled", bucket_name=None utils.exec_shell_cmd(cmd) -def flow_operation(group_id, flow_op, flow_type="symmetrical"): +def get_sync_policy(bucket_name=None): + if bucket_name is not None: + bkt = f" --bucket={bucket_name}" + else: + bkt = "" + sync_policy_resp = json.loads( + utils.exec_shell_cmd(f"radosgw-admin sync policy get" + bkt) + ) + return sync_policy_resp + + +def verify_bucket_sync_policy_on_other_site(rgw_ssh_con, bucket): + log.info(f"Verify bucket sync policy exist on other site for bucket {bucket.name}") + _, stdout, stderr = rgw_ssh_con.exec_command( + f"radosgw-admin sync policy get --bucket {bucket.name}" + ) + sync_policy_error = stderr.read().decode() + sync_policy_error_list = sync_policy_error.split("\n") + if sync_policy_error_list[0] != "": + raise TestExecError( + f"Get sync policy on bucket {bucket.name} another site failled :{sync_policy_error_list}" + ) + cmd_output = json.loads(stdout.read().decode()) + log.info(f"sync policy get from other site: {cmd_output} for bucket {bucket.name}") + if len(cmd_output["groups"]) == 0: + log.info( + f"bucket sync policy for {bucket.name} not synced to another site, sleep 60s and retry" + ) + for retry_count in range(20): + time.sleep(60) + _, re_stdout, _ = rgw_ssh_con.exec_command( + f"radosgw-admin sync policy get --bucket {bucket.name}" + ) + re_cmd_output = json.loads(re_stdout.read().decode()) + log.info( + f"sync policy get from other site after 60s: {re_cmd_output} for bucket {bucket.name}" + ) + if len(re_cmd_output["groups"]) == 0: + log.info( + f"bucket sync policy for {bucket.name} not synced to another site, so retry" + ) + else: + log.info(f"bucket sync policy synced to another site for {bucket.name}") + break + + if (retry_count > 20) and (len(re_cmd_output["groups"]) == 0): + raise TestExecError( + f"bucket sync policy for {bucket.name} not synced to another site even after 20m" + ) + + +def verify_object_sync_on_other_site(rgw_ssh_con, bucket, config): + log.info(f"Verify object sync on other site for bucket {bucket.name}") + bucket_stats = json.loads( + utils.exec_shell_cmd(f"radosgw-admin bucket stats --bucket {bucket.name}") + ) + bkt_objects = bucket_stats["usage"]["rgw.main"]["num_objects"] + if bkt_objects != config.objects_count: + raise TestExecError( + f"Did not find {config.objects_count} in bucket {bucket.name}, but found {bkt_objects}" + ) + _, stdout, _ = rgw_ssh_con.exec_command( + f"radosgw-admin bucket stats --bucket {bucket.name}" + ) + cmd_output = json.loads(stdout.read().decode()) + if "rgw.main" not in cmd_output["usage"].keys(): + for retry_count in range(20): + time.sleep(60) + _, re_stdout, _ = rgw_ssh_con.exec_command( + f"radosgw-admin bucket stats --bucket {bucket.name}" + ) + re_cmd_output = json.loads(re_stdout.read().decode()) + log.info( + f"check bucket stats on other site after 60s: {re_cmd_output} for bucket {bucket.name}" + ) + if "rgw.main" not in re_cmd_output["usage"].keys(): + log.info(f"bucket stats not synced: for bucket {bucket.name}, so retry") + else: + log.info(f"bucket stats synced for bucket {bucket.name}") + cmd_output = re_cmd_output + break + + if (retry_count > 20) and ("rgw.main" not in re_cmd_output["usage"].keys()): + raise TestExecError( + f"object not synced on bucket {bucket.name} in another site even after 20m" + ) + + site_bkt_objects = cmd_output["usage"]["rgw.main"]["num_objects"] + if bkt_objects != site_bkt_objects: + raise TestExecError( + f"object count missmatch found in another site for bucket {bucket.name} : {site_bkt_objects} expected {bkt_objects}" + ) + + +def flow_operation( + group_id, + flow_op, + flow_type="symmetrical", + bucket_name=None, + source_zone=None, + dest_zone=None, +): flow_id = group_id + "flow" + bkt = "" + if bucket_name is not None: + bkt = f" --bucket={bucket_name}" zone_names, _ = get_multisite_info() - cmd = f"radosgw-admin sync group flow {flow_op} --group-id={group_id} --flow-id={flow_id} --flow-type={flow_type} --zones={zone_names}" + cmd = f"radosgw-admin sync group flow {flow_op} --group-id={group_id} --flow-id={flow_id} --flow-type={flow_type}" + if flow_type == "directional": + cmd += f" --source-zone={source_zone} --dest-zone={dest_zone}" + bkt + else: + cmd += f" --zones={zone_names}" + bkt utils.exec_shell_cmd(cmd) return zone_names def pipe_operation( - group_id, pipe_op, zone_names=None, bucket_name=None, policy_detail=None + group_id, + pipe_op, + zone_names=None, + bucket_name=None, + policy_detail=None, + source_zones=None, + dest_zones=None, ): pipe_id = group_id + "pipe" if zone_names is not None: zone_name = zone_names.split(",") zn = f" --source-zones='{zone_name[0]}','{zone_name[1]}' --dest-zones='{zone_name[0]}','{zone_name[1]}'" + if source_zones is not None: + zn = f" --source-zones={source_zones}" + if dest_zones is not None: + zn += f" --dest-zones={dest_zones}" + else: + zn += " --dest-zones='*'" else: - zn = " --source-zones='*' --dest-zones='*'" + zn = " --source-zones='*'" + if dest_zones is not None: + zn += f" --dest-zones={dest_zones}" + else: + zn += " --dest-zones='*'" if bucket_name is not None: bkt = f" --bucket={bucket_name}" else: diff --git a/rgw/v2/tests/s3_swift/test_multisite_sync_policy.py b/rgw/v2/tests/s3_swift/test_multisite_sync_policy.py index 84c97ec8c..9d49e1228 100644 --- a/rgw/v2/tests/s3_swift/test_multisite_sync_policy.py +++ b/rgw/v2/tests/s3_swift/test_multisite_sync_policy.py @@ -5,6 +5,9 @@ Note: Any one of these yamls can be used test_multisite_sync_policy.yaml + test_sync_policy_state_change.yaml + test_multisite_mirror_sync_policy.yaml + test_multisite_bucket_mirror_sync_policy.yaml Operation: Creates and delete sync policy group @@ -20,6 +23,7 @@ import argparse import json import logging +import time import traceback import v2.lib.resource_op as s3lib @@ -45,6 +49,27 @@ def test_exec(config, ssh_con): # create user all_users_info = s3lib.create_users(config.user_count) + for each_user in all_users_info: + # authenticate + auth = Auth(each_user, ssh_con, ssl=config.ssl) + if config.use_aws4 is True: + rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) + else: + rgw_conn = auth.do_auth() + # create buckets + if config.test_ops["create_bucket"] is True: + log.info(f"no of buckets to create: {config.bucket_count}") + buckets = [] + for bc in range(config.bucket_count): + bucket_name_to_create = utils.gen_bucket_name_from_userid( + each_user["user_id"], rand_no=bc + ) + log.info(f"creating bucket with name: {bucket_name_to_create}") + bucket = reusable.create_bucket( + bucket_name_to_create, rgw_conn, each_user + ) + buckets.append(bucket) + if config.multisite_global_sync_policy: ceph_version_id, _ = utils.get_ceph_version() ceph_version_id = ceph_version_id.split("-") @@ -71,39 +96,16 @@ def test_exec(config, ssh_con): group_id2 = "new_group" reusable.group_operation(group_id2, "create", group_status) pipe2 = reusable.pipe_operation(group_id2, "create", zone_names) - if config.test_ops["pipe_remove"]: - pipe_id = reusable.pipe_operation( - group_id, "remove", zone_names - ) - if config.test_ops["flow_remove"]: - flow_type = config.test_ops["flow_type"] - zone_names = reusable.flow_operation( - group_id, "remove", flow_type - ) - if config.test_ops["group_remove"]: - group_status = config.test_ops["group_status"] - reusable.group_operation(group_id, "remove", group_status) - if config.test_ops.get("group_transition", False): - reusable.group_operation(group_id2, "remove", group_status) - for each_user in all_users_info: - # authenticate - auth = Auth(each_user, ssh_con, ssl=config.ssl) - if config.use_aws4 is True: - rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) - else: - rgw_conn = auth.do_auth() - # create buckets - if config.test_ops["create_bucket"] is True: - log.info("no of buckets to create: %s" % config.bucket_count) - for bc in range(config.bucket_count): - bucket_name_to_create = utils.gen_bucket_name_from_userid( - each_user["user_id"], rand_no=bc - ) - log.info("creating bucket with name: %s" % bucket_name_to_create) - bucket = reusable.create_bucket( - bucket_name_to_create, rgw_conn, each_user - ) + if config.test_ops["create_bucket"] is True: + for each_user in all_users_info: + # authenticate + auth = Auth(each_user, ssh_con, ssl=config.ssl) + if config.use_aws4 is True: + rgw_conn = auth.do_auth(**{"signature_version": "s3v4"}) + else: + rgw_conn = auth.do_auth() + for bkt in buckets: if config.multisite_sync_policy: ceph_version_id, _ = utils.get_ceph_version() ceph_version_id = ceph_version_id.split("-") @@ -111,21 +113,23 @@ def test_exec(config, ssh_con): if float(ceph_version_id[0]) >= 16: if utils.is_cluster_multisite(): if config.test_ops["group_create"]: - # modifying global group status to allowed + # modifying global group status to allowed if its not allowed bucket_group_status = config.test_ops[ "bucket_group_status" ] - reusable.group_operation( - group_id, - "modify", - group_status, - ) - group_id1 = "group-" + bucket_name_to_create + group_info = reusable.get_sync_policy() + if group_info["groups"][0]["status"] != "allowed": + reusable.group_operation( + group_id, + "modify", + "allowed", + ) + group_id1 = "group-" + bkt.name reusable.group_operation( group_id1, "create", bucket_group_status, - bucket_name_to_create, + bkt.name, ) zone_names = None if config.test_ops["pipe_create"]: @@ -133,25 +137,142 @@ def test_exec(config, ssh_con): group_id1, "create", zone_names, - bucket_name=bucket_name_to_create, + bucket_name=bkt.name, + ) + + period_details = json.loads( + utils.exec_shell_cmd("radosgw-admin period get") + ) + zone_list = json.loads(utils.exec_shell_cmd("radosgw-admin zone list")) + for zone in period_details["period_map"]["zonegroups"][0]["zones"]: + if zone["name"] not in zone_list["zones"]: + rgw_nodes = zone["endpoints"][0].split(":") + node_rgw = rgw_nodes[1].split("//")[-1] + log.info(f"Another site is: {zone['name']} and ip {node_rgw}") + break + rgw_ssh_con = utils.connect_remote(node_rgw) + + for bkt in buckets: + ceph_version_id, _ = utils.get_ceph_version() + ceph_version_id = ceph_version_id.split("-") + ceph_version_id = ceph_version_id[0].split(".") + if float(ceph_version_id[0]) >= 16: + if utils.is_cluster_multisite(): + if config.multisite_sync_policy: + if config.test_ops["group_create"]: + if config.test_ops["pipe_create"]: + reusable.verify_bucket_sync_policy_on_other_site( + rgw_ssh_con, bkt + ) + + if config.test_ops.get("create_object", False): + # uploading data + log.info( + f"s3 objects to create: {config.objects_count}" ) + for oc, size in list(config.mapped_sizes.items()): + config.obj_size = size + s3_object_name = utils.gen_s3_object_name( + bkt.name, oc + ) + log.info(f"s3 object name: {s3_object_name}") + s3_object_path = os.path.join( + TEST_DATA_PATH, s3_object_name + ) + log.info(f"s3 object path: {s3_object_path}") + if config.test_ops.get("enable_version", False): + reusable.upload_version_object( + config, + each_user, + rgw_conn, + s3_object_name, + config.obj_size, + bkt, + TEST_DATA_PATH, + ) + else: + log.info("upload type: normal") + reusable.upload_object( + s3_object_name, + bkt, + TEST_DATA_PATH, + config, + each_user, + ) + + reusable.verify_object_sync_on_other_site( + rgw_ssh_con, bkt, config + ) + if config.test_ops["pipe_remove"]: pipe_id = reusable.pipe_operation( group_id1, "remove", zone_names, - bucket_name=bucket_name_to_create, + bucket_name=bkt.name, + ) + + if config.test_ops["group_remove"]: + pipe_id = reusable.group_operation( + group_id1, + "remove", + group_status, + bucket_name=bkt.name, + ) + + else: + if config.test_ops.get("create_object", False): + # uploading data + log.info( + f"s3 objects to create: {config.objects_count}" + ) + for oc, size in list(config.mapped_sizes.items()): + config.obj_size = size + s3_object_name = utils.gen_s3_object_name( + bkt.name, oc ) - if config.test_ops["flow_remove"]: - flow_type = config.test_ops["flow_type"] - zone_names = reusable.flow_operation( - group_id1, "remove", flow_type + log.info(f"s3 object name: {s3_object_name}") + s3_object_path = os.path.join( + TEST_DATA_PATH, s3_object_name ) - if config.test_ops["group_remove"]: - group_status = config.test_ops["group_status"] - group_id = reusable.group_operation( - group_id1, "remove", group_status + log.info(f"s3 object path: {s3_object_path}") + if config.test_ops.get("enable_version", False): + reusable.upload_version_object( + config, + each_user, + rgw_conn, + s3_object_name, + config.obj_size, + bkt, + TEST_DATA_PATH, + ) + else: + log.info("upload type: normal") + reusable.upload_object( + s3_object_name, + bkt, + TEST_DATA_PATH, + config, + each_user, + ) + + reusable.verify_object_sync_on_other_site( + rgw_ssh_con, bkt, config ) + + if config.test_ops["pipe_remove"]: + pipe_id = reusable.pipe_operation(group_id, "remove", zone_names) + + if config.test_ops["flow_remove"]: + flow_type = config.test_ops["flow_type"] + zone_names = reusable.flow_operation(group_id, "remove", flow_type) + + if config.test_ops["group_remove"]: + group_id = reusable.group_operation(group_id, "remove", group_status) + if config.test_ops.get("group_transition", False): + reusable.group_operation(group_id2, "remove", group_status) + utils.exec_shell_cmd(f"radosgw-admin period update --commit") + # check for any crashes during the execution crash_info = reusable.check_for_crash() if crash_info: @@ -174,7 +295,7 @@ def test_exec(config, ssh_con): test_data_dir = "test_data" rgw_service = RGWService() TEST_DATA_PATH = os.path.join(project_dir, test_data_dir) - log.info("TEST_DATA_PATH: %s" % TEST_DATA_PATH) + log.info(f"TEST_DATA_PATH: {TEST_DATA_PATH}") if not os.path.exists(TEST_DATA_PATH): log.info("test data dir not exists, creating.. ") os.makedirs(TEST_DATA_PATH) @@ -200,6 +321,8 @@ def test_exec(config, ssh_con): config = Config(yaml_file) ceph_conf = CephConfOp(ssh_con) config.read(ssh_con) + if config.mapped_sizes is None: + config.mapped_sizes = utils.make_mapped_sizes(config) test_exec(config, ssh_con) test_info.success_status("test passed") sys.exit(0) @@ -209,3 +332,6 @@ def test_exec(config, ssh_con): log.error(traceback.format_exc()) test_info.failed_status("test failed") sys.exit(1) + + finally: + utils.cleanup_test_data_path(TEST_DATA_PATH) diff --git a/rgw/v2/tests/s3_swift/test_multisite_syncpolicy_prefix_tag.py b/rgw/v2/tests/s3_swift/test_multisite_syncpolicy_prefix_tag.py index 5f4070ca3..60b529856 100644 --- a/rgw/v2/tests/s3_swift/test_multisite_syncpolicy_prefix_tag.py +++ b/rgw/v2/tests/s3_swift/test_multisite_syncpolicy_prefix_tag.py @@ -3,7 +3,7 @@ Note: Following yaml can be used - configs/test_syncpolicy_prefix_tag.yaml + test_multisite_syncpolicy_prefix_tag.yaml Operation: a. On a MS setup, create sync policy with configuration to sync objects having a prefix or tag or both. @@ -53,24 +53,63 @@ def test_exec(config, ssh_con): reusable.flow_operation(group_id, "create") reusable.pipe_operation(group_id, "create") + period_details = json.loads(utils.exec_shell_cmd("radosgw-admin period get")) + zone_list = json.loads(utils.exec_shell_cmd("radosgw-admin zone list")) + for zone in period_details["period_map"]["zonegroups"][0]["zones"]: + if zone["name"] not in zone_list["zones"]: + rgw_nodes = zone["endpoints"][0].split(":") + node_rgw = rgw_nodes[1].split("//")[-1] + log.info(f"Another site is: {zone['name']} and ip {node_rgw}") + break + rgw_ssh_con = utils.connect_remote(node_rgw) + + prefix = "foo" + tag = "colour=red" + for user in user_info: auth = Auth(user, ssh_con, ssl=config.ssl) rgw_conn = auth.do_auth() + buckets = [] for bc in range(config.bucket_count): bucket_name = utils.gen_bucket_name_from_userid(user["user_id"], rand_no=bc) bucket = reusable.create_bucket(bucket_name, rgw_conn, user) log.info(f"Bucket {bucket_name} created") - prefix = "foo" - tag = "colour=red" + _, stdout, _ = rgw_ssh_con.exec_command("radosgw-admin bucket list") + cmd_output = json.loads(stdout.read().decode()) + log.info(f"bucket list response on another site is: {cmd_output}") + if bucket.name not in cmd_output: + log.info( + f"bucket {bucket.name} did not sync another site, sleep 60s and retry" + ) + for retry_count in range(20): + time.sleep(60) + _, re_stdout, _ = rgw_ssh_con.exec_command( + "radosgw-admin bucket list" + ) + re_cmd_output = json.loads(re_stdout.read().decode()) + if bucket.name not in re_cmd_output: + log.info( + f"bucket {bucket.name} not synced to other site after 60s: {re_cmd_output}, retry" + ) + else: + log.info(f"bucket {bucket.name} found on other site") + break + + if (retry_count > 20) and (len(re_cmd_output["groups"]) == 0): + raise TestExecError( + f"bucket {bucket.name} did not sync to other site even after 20m" + ) + buckets.append(bucket) + + group_info = reusable.get_sync_policy() + if group_info["groups"][0]["status"] != "allowed": + reusable.group_operation(group_id, "modify", "allowed") + + for bkt in buckets: # Create bucket sync policy - group_id1 = "group-" + bucket_name - reusable.group_operation( - group_id1, - "create", - "enabled", - bucket_name, - ) + group_id1 = "group-" + bkt.name + reusable.group_operation(group_id1, "create", "enabled", bkt.name) detail = "" if config.test_ops["has_prefix"]: detail = f"{detail} --prefix={prefix}" @@ -80,17 +119,17 @@ def test_exec(config, ssh_con): pipe_id = reusable.pipe_operation( group_id1, "create", - bucket_name=bucket_name, + bucket_name=bkt.name, policy_detail=detail, ) - time.sleep(30) + reusable.verify_bucket_sync_policy_on_other_site(rgw_ssh_con, bkt) - log.info(f"Creating objects on bucket: {bucket_name}") + log.info(f"Creating objects on bucket: {bkt.name}") log.info("s3 objects to create: %s" % config.objects_count) for oc, size in list(config.mapped_sizes.items()): config.obj_size = size - s3_object_name = utils.gen_s3_object_name(bucket_name, oc) + s3_object_name = utils.gen_s3_object_name(bkt.name, oc) if config.test_ops["has_prefix"]: # adding prefix s3_object_name = prefix + s3_object_name @@ -102,7 +141,7 @@ def test_exec(config, ssh_con): log.info("upload type: tagged") reusable.upload_object_with_tagging( s3_object_name, - bucket, + bkt, TEST_DATA_PATH, config, user, @@ -111,17 +150,22 @@ def test_exec(config, ssh_con): else: log.info("upload type: normal") reusable.upload_object( - s3_object_name, bucket, TEST_DATA_PATH, config, user + s3_object_name, bkt, TEST_DATA_PATH, config, user ) + reusable.verify_object_sync_on_other_site(rgw_ssh_con, bkt, config) + # check for any crashes during the execution + group_id = reusable.group_operation(group_id, "remove") + utils.exec_shell_cmd(f"radosgw-admin period update --commit") + crash_info = reusable.check_for_crash() if crash_info: raise TestExecError("ceph daemon crash found!") if __name__ == "__main__": - test_info = AddTestInfo("test bucket creation through awscli") + test_info = AddTestInfo("test granular sync policy with prefix and tag") try: project_dir = os.path.abspath(os.path.join(__file__, "../../..")) @@ -131,9 +175,11 @@ def test_exec(config, ssh_con): if not os.path.exists(TEST_DATA_PATH): log.info("test data dir not exists, creating.. ") os.makedirs(TEST_DATA_PATH) - parser = argparse.ArgumentParser(description="RGW S3 bucket creation using AWS") + parser = argparse.ArgumentParser( + description="RGW granular sync policy with prefix and tag" + ) parser.add_argument( - "-c", dest="config", help="RGW S3 bucket creation using AWS" + "-c", dest="config", help="RGW granular sync policy with prefix and tag" ) parser.add_argument( "-log_level",