Skip to content

Commit

Permalink
Merge pull request #574 from ckulal/lc_process
Browse files Browse the repository at this point in the history
[RGW-T2-Automation]: Test LC process (perform radosgw-admin lc proces…
  • Loading branch information
mergify[bot] authored Mar 21, 2024
2 parents 2a5a8cc + d69045c commit 59d3ab6
Show file tree
Hide file tree
Showing 3 changed files with 176 additions and 72 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#test_bucket_lifecycle_object_expiration_transition.py
# Polarian: CEPH-83574044: Test LC process (perform radosgw-admin lc process) with and without applying LC rule.
config:
user_count: 1
bucket_count: 1
objects_count: 1
parallel_lc: False
rgw_lc_debug_interval: 600
rgw_enable_lc_threads: false
test_lc_transition: True
enable_resharding: False
pool_name: data.cold
storage_class: cold
ec_pool_transition: False
multiple_transitions: False
two_pool_transition: False
objects_size_range:
min: 5
max: 15
test_ops:
create_bucket: true
create_object: true
enable_versioning: true
version_count: 1
delete_marker: false
transition_with_lc_process_without_rule: true
lifecycle_conf:
- ID: LC_Rule_1
Filter:
Prefix: key1
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#test_bucket_lifecycle_object_expiration_transition.py
# Polarian: CEPH-83574044: Test LC process (perform radosgw-admin lc process) with and without applying LC rule.
config:
user_count: 1
bucket_count: 1
objects_count: 10
parallel_lc: False
rgw_lc_debug_interval: 600
rgw_enable_lc_threads: false
test_lc_transition: True
enable_resharding: False
pool_name: data.cold
storage_class: cold
ec_pool_transition: False
multiple_transitions: False
two_pool_transition: False
objects_size_range:
min: 5
max: 15
test_ops:
create_bucket: true
create_object: true
enable_versioning: true
version_count: 1
delete_marker: false
transition_with_lc_process: true
lifecycle_conf:
- ID: LC_Rule_1
Filter:
Prefix: key1
Status: Enabled
Transitions:
- Date: "2022-02-19"
StorageClass: cold
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
test_lc_rule_conflict_transition_actions.yaml
test_lc_rule_reverse_transition.yaml
test_lc_with_custom_worktime.yaml
test_lc_process_without_applying_rule.yaml
test_lc_transition_with_lc_process.yaml
Operation:
Expand Down Expand Up @@ -219,92 +221,130 @@ def test_exec(config, ssh_con):
reusable.verify_attrs_after_resharding(bucket)

if not config.parallel_lc:
life_cycle_rule = {"Rules": config.lifecycle_conf}
reusable.put_get_bucket_lifecycle_test(
bucket,
rgw_conn,
rgw_conn2,
life_cycle_rule,
config,
upload_start_time,
upload_end_time,
)
if config.test_ops.get("reverse_transition", False):
log.info(f"verifying lc reverse transition")
rule1_lc_seconds = (
config.rgw_lc_debug_interval
* config.test_ops.get("actual_lc_days")
)
rule1_lc_timestamp = upload_end_time + 60 + rule1_lc_seconds
expected_storage_class = config.storage_class
config.test_ops[
"expected_storage_class"
] = expected_storage_class
lc_ops.validate_prefix_rule(bucket, config)

rule2_lc_seconds = (
config.rgw_lc_debug_interval
* config.test_ops.get("rule2_lc_days")
)
rule2_lc_timestamp = rule1_lc_timestamp + rule2_lc_seconds
if config.test_ops.get(
"transition_with_lc_process_without_rule", False
):
log.info(
f"sleeping till {datetime.fromtimestamp(rule2_lc_timestamp)} before verifying lc transition rule2"
f"perform LC transition with lc process command without applying any rule"
)
while time.time() < rule2_lc_timestamp:
log.info(
f"current time: {datetime.fromtimestamp(time.time())}"
)
time.sleep(5)
expected_storage_class = config.second_storage_class
config.test_ops[
"expected_storage_class"
] = expected_storage_class
lc_ops.validate_prefix_rule(bucket, config)

rule3_lc_seconds = (
config.rgw_lc_debug_interval
* config.test_ops.get("rule3_lc_days")
cmd = f"radosgw-admin lc process --bucket {bucket_name}"
err = utils.exec_shell_cmd(
cmd, debug_info=True, return_err=True
)
rule3_lc_timestamp = rule2_lc_timestamp + rule3_lc_seconds
log.info(
f"sleeping till {datetime.fromtimestamp(rule3_lc_timestamp)} before verifying lc transition rule3"
log.info(f"ERROR: {err}")
if "Segmentation fault" in err:
raise TestExecError("Segmentation fault occured")

elif config.test_ops.get("transition_with_lc_process", False):
log.info(f"perform LC transition with lc process command")
life_cycle_rule = {"Rules": config.lifecycle_conf}
reusable.put_bucket_lifecycle(
bucket,
rgw_conn,
rgw_conn2,
life_cycle_rule,
)
while time.time() < rule3_lc_timestamp:
log.info(
f"current time: {datetime.fromtimestamp(time.time())}"
)
time.sleep(5)
expected_storage_class = config.storage_class
config.test_ops[
"expected_storage_class"
] = expected_storage_class
lc_ops.validate_prefix_rule(bucket, config)
else:
cmd = f"radosgw-admin lc process --bucket {bucket_name}"
out = utils.exec_shell_cmd(cmd)
cmd = f"radosgw-admin lc list"
lc_list = json.loads(utils.exec_shell_cmd(cmd))
for data in lc_list:
if data["bucket"] == bucket_name:
if data["status"] == "UNINITIAL":
raise TestExecError(
f"Even if rgw_enable_lc_threads set to false manual lc process for bucket"
f"{bucket_name} should work"
)
log.info("sleeping for 30 seconds")
time.sleep(30)
lc_ops.validate_prefix_rule(bucket, config)

if config.test_ops["delete_marker"] is True:
life_cycle_rule_new = {"Rules": config.delete_marker_ops}
else:
life_cycle_rule = {"Rules": config.lifecycle_conf}
reusable.put_get_bucket_lifecycle_test(
bucket,
rgw_conn,
rgw_conn2,
life_cycle_rule_new,
life_cycle_rule,
config,
upload_start_time,
upload_end_time,
)
if config.multiple_delete_marker_check:
log.info(
f"verification of TC: Not more than 1 delete marker is created for objects deleted many times using LC"
)
time.sleep(60)
cmd = f"radosgw-admin bucket list --bucket {bucket.name}| grep delete-marker | wc -l"
out = utils.exec_shell_cmd(cmd)
del_marker_count = out.split("\n")[0]
if int(del_marker_count) != int(config.objects_count):
raise AssertionError(
f"more than one delete marker created for the objects in the bucket {bucket.name}"
if config.test_ops.get("reverse_transition", False):
log.info(f"verifying lc reverse transition")
rule1_lc_seconds = (
config.rgw_lc_debug_interval
* config.test_ops.get("actual_lc_days")
)
rule1_lc_timestamp = upload_end_time + 60 + rule1_lc_seconds
expected_storage_class = config.storage_class
config.test_ops[
"expected_storage_class"
] = expected_storage_class
lc_ops.validate_prefix_rule(bucket, config)

rule2_lc_seconds = (
config.rgw_lc_debug_interval
* config.test_ops.get("rule2_lc_days")
)
rule2_lc_timestamp = rule1_lc_timestamp + rule2_lc_seconds
log.info(
f"sleeping till {datetime.fromtimestamp(rule2_lc_timestamp)} before verifying lc transition rule2"
)
while time.time() < rule2_lc_timestamp:
log.info(
f"current time: {datetime.fromtimestamp(time.time())}"
)
time.sleep(5)
expected_storage_class = config.second_storage_class
config.test_ops[
"expected_storage_class"
] = expected_storage_class
lc_ops.validate_prefix_rule(bucket, config)

rule3_lc_seconds = (
config.rgw_lc_debug_interval
* config.test_ops.get("rule3_lc_days")
)
rule3_lc_timestamp = rule2_lc_timestamp + rule3_lc_seconds
log.info(
f"sleeping till {datetime.fromtimestamp(rule3_lc_timestamp)} before verifying lc transition rule3"
)
while time.time() < rule3_lc_timestamp:
log.info(
f"current time: {datetime.fromtimestamp(time.time())}"
)
time.sleep(5)
expected_storage_class = config.storage_class
config.test_ops[
"expected_storage_class"
] = expected_storage_class
lc_ops.validate_prefix_rule(bucket, config)
else:
log.info("sleeping for 30 seconds")
time.sleep(30)
lc_ops.validate_prefix_rule(bucket, config)

if config.test_ops["delete_marker"] is True:
life_cycle_rule_new = {"Rules": config.delete_marker_ops}
reusable.put_get_bucket_lifecycle_test(
bucket,
rgw_conn,
rgw_conn2,
life_cycle_rule_new,
config,
)
if config.multiple_delete_marker_check:
log.info(
f"verification of TC: Not more than 1 delete marker is created for objects deleted many times using LC"
)
time.sleep(60)
cmd = f"radosgw-admin bucket list --bucket {bucket.name}| grep delete-marker | wc -l"
out = utils.exec_shell_cmd(cmd)
del_marker_count = out.split("\n")[0]
if int(del_marker_count) != int(config.objects_count):
raise AssertionError(
f"more than one delete marker created for the objects in the bucket {bucket.name}"
)
else:
buckets.append(bucket)

Expand Down

0 comments on commit 59d3ab6

Please sign in to comment.