Skip to content

Commit

Permalink
Merge pull request #598 from anrao19/CEPH-83575143
Browse files Browse the repository at this point in the history
[Tier-3]Bucket Granular sync policy with storage class
  • Loading branch information
mergify[bot] authored Jun 11, 2024
2 parents 30e9edd + 319686a commit b41f052
Show file tree
Hide file tree
Showing 4 changed files with 270 additions and 5 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Polarian TC : CEPH-83575143
# script: test_s3cmd_bucket_granular_sync_policy.py
config:
user_count: 1
bucket_count: 1
objects_count: 100
pool_name: data.cold
storage_class: cold
objects_size_range:
min: 5K
max: 2M
test_ops:
dest_param_storage_class: true
zonegroup_group: true
zonegroup_status: allowed
zonegroup_flow: true
zonegroup_flow_type: directional
zonegroup_source_zone: primary
zonegroup_dest_zone: secondary
zonegroup_source_zones: primary
zonegroup_dest_zones: secondary
zonegroup_pipe: true
bucket_group: true
bucket_status: enabled
bucket_flow: false
bucket_pipe: true
bucket_source_zones: primary
bucket_dest_zones: secondary
bucket_policy_details: --storage-class cold
create_object: true
create_bucket: true
should_sync: true
write_io_verify_another_site: true
zonegroup_group_remove: true
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Polarian TC : CEPH-83575143
# script: test_s3cmd_bucket_granular_sync_policy.py
config:
user_count: 1
bucket_count: 1
objects_count: 100
pool_name: data.glacier
storage_class: glacier
objects_size_range:
min: 5K
max: 2M
test_ops:
dest_param_storage_class: true
zonegroup_group: true
zonegroup_status: allowed
zonegroup_flow: true
zonegroup_flow_type: symmetrical
zonegroup_pipe: true
bucket_group: true
bucket_status: enabled
bucket_flow: false
bucket_pipe: true
bucket_policy_details: --storage-class glacier
create_object: true
create_bucket: true
should_sync: true
write_io_verify_another_site: true
write_io_verify_should_sync: true
zonegroup_group_remove: true
41 changes: 41 additions & 0 deletions rgw/v2/tests/s3_swift/reusable.py
Original file line number Diff line number Diff line change
Expand Up @@ -2399,3 +2399,44 @@ def put_get_bucket_encryption(rgw_s3_client, bucket_name, config):
# get bucket encryption
log.info(f"get bucket encryption for bucket : {bucket_name}")
sse_s3.get_bucket_encryption(rgw_s3_client, bucket_name)


def create_storage_class_in_all_zones(current_zone, rgw_ssh_con, config):
"""
This function is to set the prereqs for object sync with bucket granular sync policy
"""
_, stdout, _ = rgw_ssh_con.exec_command("radosgw-admin bucket list")
pool_name = config.pool_name
storage_class = config.storage_class
zone_names, _ = get_multisite_info()
log.info(f"zones available are: {zone_names}")
op = utils.exec_shell_cmd("radosgw-admin sync status")
lines = list(op.split("\n"))
for line in lines:
if "zonegroup" in line:
zonegroup = line[line.find("(") + 1 : line.find(")")]
break

for zone in zone_names:
if zone == current_zone:
utils.exec_shell_cmd(
f"radosgw-admin zonegroup placement add --rgw-zonegroup {zonegroup} --placement-id default-placement --storage-class {storage_class}"
)
utils.exec_shell_cmd(
f"radosgw-admin zone placement add --rgw-zone {zone} --placement-id default-placement --storage-class {storage_class} --data-pool {pool_name}"
)
utils.exec_shell_cmd(f"ceph osd pool create {pool_name}")
utils.exec_shell_cmd(f"ceph osd pool application enable {pool_name} rgw")
utils.exec_shell_cmd("radosgw-admin period update --commit")
else:
rgw_ssh_con.exec_command(
f"radosgw-admin zonegroup placement add --rgw-zonegroup {zonegroup} --placement-id default-placement --storage-class {storage_class}"
)
rgw_ssh_con.exec_command(
f"radosgw-admin zone placement add --rgw-zone {zone} --placement-id default-placement --storage-class {storage_class} --data-pool {pool_name}"
)
rgw_ssh_con.exec_command(f"ceph osd pool create {pool_name}")
rgw_ssh_con.exec_command(
f"ceph osd pool application enable {pool_name} rgw"
)
rgw_ssh_con.exec_command("radosgw-admin period update --commit")
171 changes: 166 additions & 5 deletions rgw/v2/tests/s3_swift/test_multisite_bucket_granular_sync_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@
multisite_configs/test_multisite_granular_bucketsync_sync_from_diff_bucket.yaml
multisite_configs/test_multisite_granular_bucketsync_archive_symmetrical.yaml
multisite_configs/test_multisite_granular_bucketsync_archive_directional.yaml
multisite_configs/test_bucket_granularsync_storage_class_symm.yaml
multisite_configs/test_bucket_granularsync_storage_class_direc.yaml
multisite_configs/test_bucket_granularsync_owner_translation_direc.yaml
multisite_configs/test_bucket_granularsync_owner_translation_symm.yaml
Operation:
Creates delete sync policy group bucket , zonegroupl level
Expand Down Expand Up @@ -110,6 +114,20 @@ def test_exec(config, ssh_con):
reusable.verify_bucket_sync_on_other_site(rgw_ssh_con, bucket)
buckets.append(bucket)

if config.test_ops.get("dest_param_storage_class", False):
reusable.create_storage_class_in_all_zones(zone["name"], rgw_ssh_con, config)

if config.test_ops.get("dest_param_owner_translation", False):
log.info("creating new user and its owned bucket for sync destination")
new_users_info = s3lib.create_users(config.user_count)
auth = Auth(new_users_info[0], ssh_con, ssl=config.ssl)
new_rgw_conn = auth.do_auth()
new_bkt_name = utils.gen_bucket_name_from_userid(new_users_info[0]["user_id"])
log.info(
f"creating bucket with name: {new_bkt_name} for user {new_users_info[0]['user_id']}"
)
bkt_new = reusable.create_bucket(new_bkt_name, new_rgw_conn, new_users_info[0])

if utils.is_cluster_multisite():
if config.test_ops.get("zonegroup_group", False):
group_status = config.test_ops["zonegroup_status"]
Expand Down Expand Up @@ -202,11 +220,14 @@ def test_exec(config, ssh_con):
bucket_details = config.test_ops.get(
"bucket_policy_details", None
)
if bucket_details is not None:
bucket_details = " " + bucket_details

pipe_id = None
if config.test_ops.get(
"sync_to_diff_bucket", False
):
bucket_details = " " + bucket_details.replace(
bucket_details = bucket_details.replace(
"<dest_bucket_name>",
f"{bkt.name}-new-{pipec}",
)
Expand All @@ -215,10 +236,22 @@ def test_exec(config, ssh_con):
if config.test_ops.get(
"sync_from_diff_bucket", False
):
bucket_details = " " + bucket_details.replace(
bucket_details = bucket_details.replace(
"<source_bucket_name>", old_bucket
)

if config.test_ops.get(
"dest_param_owner_translation", False
):
bucket_details = bucket_details.replace(
"<dest_bucket_name>",
new_bkt_name,
)
bucket_details = bucket_details.replace(
"<dest_owner>",
new_users_info[0]["user_id"],
)

bucket_source_pipe = config.test_ops.get(
"bucket_source_zones", None
)
Expand Down Expand Up @@ -289,6 +322,25 @@ def test_exec(config, ssh_con):
reusable.verify_object_sync_on_other_site(
rgw_ssh_con, bkt, config
)

if config.test_ops.get("dest_param_storage_class", False):
log.info(
f"Start the validation of object sync in destination with staorage class {config.storage_class}"
)
_, out, _ = rgw_ssh_con.exec_command(
f"radosgw-admin bucket list --bucket {bkt.name}"
)
bkt_list = json.loads(out.read().decode())
for obj in bkt_list:
if obj["name"].startswith(f"key_{bkt.name}_"):
if (
obj["meta"]["storage_class"]
!= config.storage_class
):
raise TestExecError(
f"object synced to non-master for bucket {bkt.name}, does not belong to storage class {config.storage_class}"
)

else:
time.sleep(1200)
_, stdout, _ = rgw_ssh_con.exec_command(
Expand Down Expand Up @@ -389,10 +441,43 @@ def test_exec(config, ssh_con):
f"object should be sync to another site for bucket {bkt.name}, but not synced"
)

if config.test_ops.get("write_io_verify_another_site", False):
if config.test_ops.get(
"sync_to_diff_bucket", False
) or config.test_ops.get("sync_from_diff_bucket", False):
"dest_param_owner_translation", False
):
log.info(
f"Verify object sync on same site for bucket {bkt.name}"
)
bucket_stats = json.loads(
utils.exec_shell_cmd(
f"radosgw-admin bucket stats --bucket {bkt.name}"
)
)
bkt_objects = bucket_stats["usage"]["rgw.main"][
"num_objects"
]
if bkt_objects != config.objects_count:
raise TestExecError(
f"Did not find {config.objects_count} in bucket {bkt.name}, but found {bkt_objects}"
)

log.info(
f"object did sync on same site for bucket {bkt.name} as expected"
)
reusable.verify_object_sync_on_other_site(
rgw_ssh_con,
bkt_new,
config,
bucket_object=bkt_objects,
)

if config.test_ops.get("write_io_verify_another_site", False):
if (
config.test_ops.get("sync_to_diff_bucket", False)
or config.test_ops.get("sync_from_diff_bucket", False)
or config.test_ops.get(
"dest_param_owner_translation", False
)
):
cmd_output = json.loads(
utils.exec_shell_cmd(
f"radosgw-admin bucket stats --bucket {bkt.name}"
Expand Down Expand Up @@ -478,6 +563,29 @@ def test_exec(config, ssh_con):
f"Object synced for bucket {bkt.name}, on another site as expected"
)

if config.test_ops.get(
"dest_param_storage_class", False
):
log.info(
f"Start the validation of object sync in destination with staorage class {config.storage_class}"
)
bkt_list = json.loads(
utils.exec_shell_cmd(
f"radosgw-admin bucket list --bucket {bkt.name}"
)
)
for obj in bkt_list:
if obj["name"].startswith(
f"new-key_{bkt.name}_"
):
if (
obj["meta"]["storage_class"]
!= config.storage_class
):
raise TestExecError(
f"object synced to master for bucket {bkt.name}, does not belong to storage class {config.storage_class}"
)

elif config.test_ops.get(
"sync_to_diff_bucket", False
) or config.test_ops.get("sync_from_diff_bucket", False):
Expand Down Expand Up @@ -531,6 +639,59 @@ def test_exec(config, ssh_con):
f"Object synced for bucket {new_bkt}, on another site as expected"
)

elif config.test_ops.get(
"dest_param_owner_translation", False
):
if bkt_objects != sync_num_obj:
raise TestExecError(
f"Object should not sync in bucket {bkt.name}, but found {bkt_objects}"
)

_, stats_stdout, _ = rgw_ssh_con.exec_command(
f"radosgw-admin bucket stats --bucket {new_bkt_name}"
)
re_cmd_output = json.loads(stats_stdout.read().decode())
log.info(
f"re_cmd_output for {new_bkt_name} : {re_cmd_output}"
)
if (
re_cmd_output["usage"]["rgw.main"]["num_objects"]
!= config.objects_count
):
raise TestExecError(
f"IO performed for {bkt.name} should not sync to {new_bkt_name} in same site as of IO"
)
log.info(
f"IO did not sync to {new_bkt_name} as expected in same site as of IO"
)
log.info(
f"verify IO sync on {new_bkt_name} in another site"
)
new_bucket_stats = json.loads(
utils.exec_shell_cmd(
f"radosgw-admin bucket stats --bucket {new_bkt_name}"
)
)
if (
config.test_ops["zonegroup_flow_type"]
== "directional"
):
if "rgw.main" in new_bucket_stats["usage"].keys():
raise TestExecError(
f"Object did not expect to sync to bucket {new_bkt_name}, but found {new_bucket_stats['usage']['rgw.main']['num_objects']}"
)
else:
new_bkt_objects = new_bucket_stats["usage"][
"rgw.main"
]["num_objects"]
if new_bkt_objects != config.objects_count:
raise TestExecError(
f"Object did not sync in bucket {new_bkt_name}, but found {new_bkt_objects}"
)
log.info(
f"Object synced for bucket {new_bkt_name}, on another site as expected"
)

else:
if bkt_objects != config.objects_count:
raise TestExecError(
Expand Down

0 comments on commit b41f052

Please sign in to comment.