Skip to content

Commit

Permalink
[Tier-3]Bucket Granular sync policy with storage class
Browse files Browse the repository at this point in the history
Signed-off-by: Anuchaithra <[email protected]>
  • Loading branch information
anrao19 committed Jun 4, 2024
1 parent 585647d commit 144c3e6
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 2 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Polarian TC : CEPH-83575143
# script: test_s3cmd_bucket_granular_sync_policy.py
config:
user_count: 1
bucket_count: 1
objects_count: 100
pool_name: data.cold
storage_class: cold
objects_size_range:
min: 5K
max: 2M
test_ops:
dest_param_storage_class: true
zonegroup_group: true
zonegroup_status: allowed
zonegroup_flow: true
zonegroup_flow_type: directional
zonegroup_source_zone: primary
zonegroup_dest_zone: secondary
zonegroup_source_zones: primary
zonegroup_dest_zones: secondary
zonegroup_pipe: true
bucket_group: true
bucket_status: enabled
bucket_flow: false
bucket_pipe: true
bucket_source_zones: primary
bucket_dest_zones: secondary
bucket_policy_details: --storage-class cold
create_object: true
create_bucket: true
should_sync: true
write_io_verify_another_site: true
zonegroup_group_remove: true
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Polarian TC : CEPH-83575143
# script: test_s3cmd_bucket_granular_sync_policy.py
config:
user_count: 1
bucket_count: 1
objects_count: 100
pool_name: data.glacier
storage_class: glacier
objects_size_range:
min: 5K
max: 2M
test_ops:
dest_param_storage_class: true
zonegroup_group: true
zonegroup_status: allowed
zonegroup_flow: true
zonegroup_flow_type: symmetrical
zonegroup_pipe: true
bucket_group: true
bucket_status: enabled
bucket_flow: false
bucket_pipe: true
bucket_policy_details: --storage-class glacier
create_object: true
create_bucket: true
should_sync: true
write_io_verify_another_site: true
write_io_verify_should_sync: true
zonegroup_group_remove: true
41 changes: 41 additions & 0 deletions rgw/v2/tests/s3_swift/reusable.py
Original file line number Diff line number Diff line change
Expand Up @@ -2399,3 +2399,44 @@ def put_get_bucket_encryption(rgw_s3_client, bucket_name, config):
# get bucket encryption
log.info(f"get bucket encryption for bucket : {bucket_name}")
sse_s3.get_bucket_encryption(rgw_s3_client, bucket_name)


def create_storage_class_in_all_zones(current_zone, rgw_ssh_con, config):
"""
This function is to set the prereqs for object sync with bucket granular sync policy
"""
_, stdout, _ = rgw_ssh_con.exec_command("radosgw-admin bucket list")
pool_name = config.pool_name
storage_class = config.storage_class
zone_names, _ = get_multisite_info()
log.info(f"zones available are: {zone_names}")
op = utils.exec_shell_cmd("radosgw-admin sync status")
lines = list(op.split("\n"))
for line in lines:
if "zonegroup" in line:
zonegroup = line[line.find("(") + 1 : line.find(")")]
break

for zone in zone_names:
if zone == current_zone:
utils.exec_shell_cmd(
f"radosgw-admin zonegroup placement add --rgw-zonegroup {zonegroup} --placement-id default-placement --storage-class {storage_class}"
)
utils.exec_shell_cmd(
f"radosgw-admin zone placement add --rgw-zone {zone} --placement-id default-placement --storage-class {storage_class} --data-pool {pool_name}"
)
utils.exec_shell_cmd(f"ceph osd pool create {pool_name}")
utils.exec_shell_cmd(f"ceph osd pool application enable {pool_name} rgw")
utils.exec_shell_cmd("radosgw-admin period update --commit")
else:
rgw_ssh_con.exec_command(
f"radosgw-admin zonegroup placement add --rgw-zonegroup {zonegroup} --placement-id default-placement --storage-class {storage_class}"
)
rgw_ssh_con.exec_command(
f"radosgw-admin zone placement add --rgw-zone {zone} --placement-id default-placement --storage-class {storage_class} --data-pool {pool_name}"
)
rgw_ssh_con.exec_command(f"ceph osd pool create {pool_name}")
rgw_ssh_con.exec_command(
f"ceph osd pool application enable {pool_name} rgw"
)
rgw_ssh_con.exec_command("radosgw-admin period update --commit")
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
multisite_configs/test_multisite_granular_bucketsync_sync_from_diff_bucket.yaml
multisite_configs/test_multisite_granular_bucketsync_archive_symmetrical.yaml
multisite_configs/test_multisite_granular_bucketsync_archive_directional.yaml
multisite_configs/test_bucket_granularsync_storage_class_symm.yaml
multisite_configs/test_bucket_granularsync_storage_class_direc.yaml
Operation:
Creates delete sync policy group bucket , zonegroupl level
Expand Down Expand Up @@ -110,6 +112,9 @@ def test_exec(config, ssh_con):
reusable.verify_bucket_sync_on_other_site(rgw_ssh_con, bucket)
buckets.append(bucket)

if config.test_ops.get("dest_param_storage_class", False):
reusable.create_storage_class_in_all_zones(zone["name"], rgw_ssh_con, config)

if utils.is_cluster_multisite():
if config.test_ops.get("zonegroup_group", False):
group_status = config.test_ops["zonegroup_status"]
Expand Down Expand Up @@ -202,11 +207,14 @@ def test_exec(config, ssh_con):
bucket_details = config.test_ops.get(
"bucket_policy_details", None
)
if bucket_details is not None:
bucket_details = " " + bucket_details

pipe_id = None
if config.test_ops.get(
"sync_to_diff_bucket", False
):
bucket_details = " " + bucket_details.replace(
bucket_details = bucket_details.replace(
"<dest_bucket_name>",
f"{bkt.name}-new-{pipec}",
)
Expand All @@ -215,7 +223,7 @@ def test_exec(config, ssh_con):
if config.test_ops.get(
"sync_from_diff_bucket", False
):
bucket_details = " " + bucket_details.replace(
bucket_details = bucket_details.replace(
"<source_bucket_name>", old_bucket
)

Expand Down Expand Up @@ -289,6 +297,25 @@ def test_exec(config, ssh_con):
reusable.verify_object_sync_on_other_site(
rgw_ssh_con, bkt, config
)

if config.test_ops.get("dest_param_storage_class", False):
log.info(
f"Start the validation of object sync in destination with staorage class {config.storage_class}"
)
_, out, _ = rgw_ssh_con.exec_command(
f"radosgw-admin bucket list --bucket {bkt.name}"
)
bkt_list = json.loads(out.read().decode())
for obj in bkt_list:
if obj["name"].startswith(f"key_{bkt.name}_"):
if (
obj["meta"]["storage_class"]
!= config.storage_class
):
raise TestExecError(
f"object synced to non-master for bucket {bkt.name}, does not belong to storage class {config.storage_class}"
)

else:
time.sleep(1200)
_, stdout, _ = rgw_ssh_con.exec_command(
Expand Down Expand Up @@ -478,6 +505,28 @@ def test_exec(config, ssh_con):
f"Object synced for bucket {bkt.name}, on another site as expected"
)

if config.test_ops.get(
"dest_param_storage_class", False
):
log.info(
f"Start the validation of object sync in destination with staorage class {config.storage_class}")
bkt_list = json.loads(
utils.exec_shell_cmd(
f"radosgw-admin bucket list --bucket {bkt.name}"
)
)
for obj in bkt_list:
if obj["name"].startswith(
f"new-key_{bkt.name}_"
):
if (
obj["meta"]["storage_class"]
!= config.storage_class
):
raise TestExecError(
f"object synced to master for bucket {bkt.name}, does not belong to storage class {config.storage_class}"
)

elif config.test_ops.get(
"sync_to_diff_bucket", False
) or config.test_ops.get("sync_from_diff_bucket", False):
Expand Down

0 comments on commit 144c3e6

Please sign in to comment.