diff --git a/ods_ci/tests/Resources/CLI/MustGather/MustGather.resource b/ods_ci/tests/Resources/CLI/MustGather/MustGather.resource index 50f506a6f..d42abee82 100644 --- a/ods_ci/tests/Resources/CLI/MustGather/MustGather.resource +++ b/ods_ci/tests/Resources/CLI/MustGather/MustGather.resource @@ -7,7 +7,7 @@ Resource ../../Common.robot *** Keywords *** -Get must-gather Logs +Get Must-Gather Logs [Documentation] Runs the must-gather image and obtains the ODH/RHOAI logs ${output}= Run process tests/Resources/CLI/MustGather/get-must-gather-logs.sh shell=yes Should Be Equal As Integers ${output.rc} 0 @@ -27,6 +27,6 @@ Verify Logs For ${namespace} ${log_files}= Run find ${namespaces_log_dir}/${namespace}/pods -type f -name "*.log" Should Not Be Equal ${log_files} ${EMPTY} -Cleanup must-gather Logs +Cleanup Must-Gather Logs [Documentation] Deletes the folder with the must-gather logs Run Keyword If "${must_gather_dir}" != "${EMPTY}" Remove Directory ${must_gather_dir} recursive=True diff --git a/ods_ci/tests/Tests/0100__platform/0103__must_gather/test-must-gather-logs.robot b/ods_ci/tests/Tests/0100__platform/0103__must_gather/test-must-gather-logs.robot index 29388c627..61712deed 100644 --- a/ods_ci/tests/Tests/0100__platform/0103__must_gather/test-must-gather-logs.robot +++ b/ods_ci/tests/Tests/0100__platform/0103__must_gather/test-must-gather-logs.robot @@ -16,10 +16,10 @@ Verify that the must-gather image provides RHODS logs and info ... MustGather ... ExcludeOnODH ... ExcludeOnDisconnected - Get must-gather Logs - Verify logs for ${APPLICATIONS_NAMESPACE} + Get Must-Gather Logs + Verify Logs For ${APPLICATIONS_NAMESPACE} IF "${PRODUCT}" == "RHODS" Verify Logs For ${OPERATOR_NAMESPACE} - Run Keyword If RHODS Is Managed Verify logs for ${MONITORING_NAMESPACE} + Run Keyword If RHODS Is Managed Verify Logs For ${MONITORING_NAMESPACE} END [Teardown] Cleanup must-gather Logs diff --git a/ods_ci/tests/Tests/0200__rhoai_upgrade/0201__pre_upgrade.robot b/ods_ci/tests/Tests/0200__rhoai_upgrade/0201__pre_upgrade.robot index a409ba2c1..928e8ab40 100644 --- a/ods_ci/tests/Tests/0200__rhoai_upgrade/0201__pre_upgrade.robot +++ b/ods_ci/tests/Tests/0200__rhoai_upgrade/0201__pre_upgrade.robot @@ -1,197 +1,254 @@ *** Settings *** -Documentation Test Suite for Upgrade testing, to be run before the upgrade -Library OpenShiftLibrary -Resource ../../Resources/RHOSi.resource -Resource ../../Resources/ODS.robot -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboard.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboardResources.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHModelServing.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/DataConnections.resource -Resource ../../Resources/Page/ODH/JupyterHub/HighAvailability.robot -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/Projects.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/ModelServer.resource -Resource ../../Resources/Page/ODH/AiApps/Anaconda.resource -Resource ../../Resources/Page/LoginPage.robot -Resource ../../Resources/Page/OCPLogin/OCPLogin.robot -Resource ../../Resources/Common.robot -Resource ../../Resources/Page/OCPDashboard/Pods/Pods.robot -Resource ../../Resources/Page/OCPDashboard/Builds/Builds.robot -Resource ../../Resources/Page/HybridCloudConsole/OCM.robot -Resource ../../Resources/CLI/ModelServing/modelmesh.resource -Resource ../../Resources/CLI/DataSciencePipelines/DataSciencePipelinesUpgradeTesting.resource -Resource ../../Resources/Page/DistributedWorkloads/DistributedWorkloads.resource -Resource ../../Resources/Page/DistributedWorkloads/WorkloadMetricsUI.resource -Resource ../../Resources/Page/ModelRegistry/ModelRegistry.resource -Suite Setup Dashboard Suite Setup -Suite Teardown RHOSi Teardown -Test Tags PreUpgrade +Documentation Test Suite for Upgrade testing, to be run before the upgrade + +Library OpenShiftLibrary +Resource ../../Resources/RHOSi.resource +Resource ../../Resources/ODS.robot +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboard.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboardResources.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHModelServing.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/DataConnections.resource +Resource ../../Resources/Page/ODH/JupyterHub/HighAvailability.robot +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/Projects.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/ModelServer.resource +Resource ../../Resources/Page/ODH/AiApps/Anaconda.resource +Resource ../../Resources/Page/LoginPage.robot +Resource ../../Resources/Page/OCPLogin/OCPLogin.robot +Resource ../../Resources/Common.robot +Resource ../../Resources/Page/OCPDashboard/Pods/Pods.robot +Resource ../../Resources/Page/OCPDashboard/Builds/Builds.robot +Resource ../../Resources/Page/HybridCloudConsole/OCM.robot +Resource ../../Resources/CLI/ModelServing/modelmesh.resource +Resource ../../Resources/CLI/DataSciencePipelines/DataSciencePipelinesUpgradeTesting.resource +Resource ../../Resources/Page/DistributedWorkloads/DistributedWorkloads.resource +Resource ../../Resources/Page/DistributedWorkloads/WorkloadMetricsUI.resource +Resource ../../Resources/Page/ModelRegistry/ModelRegistry.resource + +Suite Setup Dashboard Suite Setup +Suite Teardown RHOSi Teardown + +Test Tags PreUpgrade *** Variables *** -${CUSTOM_CULLER_TIMEOUT} 60000 -${S_SIZE} 25 -${DW_PROJECT_CREATED}= False +${CUSTOM_CULLER_TIMEOUT} 60000 +${S_SIZE} 25 +${DW_PROJECT_CREATED} False *** Test Cases *** Set PVC Size Via UI [Documentation] Sets a Pod toleration via the admin UI - [Tags] Upgrade - [Setup] Begin Web Test - Set PVC Value In RHODS Dashboard ${S_SIZE} - [Teardown] Dashboard Test Teardown + [Tags] Upgrade + [Setup] Begin Web Test + Set PVC Value In RHODS Dashboard ${S_SIZE} + [Teardown] Dashboard Test Teardown Set Culler Timeout - [Documentation] Sets a culler timeout via the admin UI - [Tags] Upgrade - [Setup] Begin Web Test - Modify Notebook Culler Timeout ${CUSTOM_CULLER_TIMEOUT} - [Teardown] Dashboard Test Teardown + [Documentation] Sets a culler timeout via the admin UI + [Tags] Upgrade + [Setup] Begin Web Test + Modify Notebook Culler Timeout ${CUSTOM_CULLER_TIMEOUT} + [Teardown] Dashboard Test Teardown Setting Pod Toleration Via UI [Documentation] Sets a Pod toleration via the admin UI - [Tags] Upgrade - [Setup] Begin Web Test - Menu.Navigate To Page Settings Cluster settings - Wait Until Page Contains Notebook pod tolerations - Set Pod Toleration Via UI TestToleration + [Tags] Upgrade + [Setup] Begin Web Test + Menu.Navigate To Page Settings Cluster settings + Wait Until Page Contains Notebook pod tolerations + Set Pod Toleration Via UI TestToleration Disable "Usage Data Collection" - [Teardown] Dashboard Test Teardown + [Teardown] Dashboard Test Teardown Verify RHODS Accept Multiple Admin Groups And CRD Gets Updates [Documentation] Verify that users can set multiple admin groups and - ... check OdhDashboardConfig CRD gets updated according to Admin UI - [Tags] Upgrade - [Setup] Begin Web Test - Launch Dashboard And Check User Management Option Is Available For The User ${TEST_USER.USERNAME} ${TEST_USER.PASSWORD} ${TEST_USER.AUTH_TYPE} #robocop: disable + ... check OdhDashboardConfig CRD gets updated according to Admin UI + [Tags] Upgrade + [Setup] Begin Web Test + # robocop: disable + Launch Dashboard And Check User Management Option Is Available For The User + ... ${TEST_USER.USERNAME} + ... ${TEST_USER.PASSWORD} + ... ${TEST_USER.AUTH_TYPE} Clear User Management Settings - Add OpenShift Groups To Data Science Administrators rhods-admins rhods-users - Add OpenShift Groups To Data Science User Groups system:authenticated + Add OpenShift Groups To Data Science Administrators rhods-admins rhods-users + Add OpenShift Groups To Data Science User Groups system:authenticated Save Changes In User Management Setting - [Teardown] Dashboard Test Teardown + [Teardown] Dashboard Test Teardown Verify Custom Image Can Be Added - [Documentation] Create Custome notebook using Cli - [Tags] Upgrade - Oc Apply kind=ImageStream src=tests/Tests/0200__rhoai_upgrade/custome_image.yaml + [Documentation] Create Custome notebook using Cli + [Tags] Upgrade + Oc Apply kind=ImageStream src=tests/Tests/0200__rhoai_upgrade/custome_image.yaml Verify User Can Disable The Runtime - [Documentation] Disable the Serving runtime using Cli - [Tags] Upgrade - Disable Model Serving Runtime Using CLI namespace=redhat-ods-applications + [Documentation] Disable the Serving runtime using Cli + [Tags] Upgrade + Disable Model Serving Runtime Using CLI namespace=redhat-ods-applications Verify Model Can Be Deployed Via UI For Upgrade - [Tags] Upgrade - [Setup] Begin Web Test - ${PRJ_TITLE}= Set Variable model-serving-upgrade - ${PRJ_DESCRIPTION}= Set Variable project used for model serving tests - ${MODEL_NAME}= Set Variable test-model - ${MODEL_CREATED}= Set Variable ${FALSE} - ${RUNTIME_NAME}= Set Variable Model Serving Test - ${INFERENCE_INPUT}= Set Variable @tests/Resources/Files/modelmesh-mnist-input.json - ${INFERENCE_INPUT_OPENVINO}= Set Variable @tests/Resources/Files/openvino-example-input.json - ${EXPECTED_INFERENCE_OUTPUT}= Set Variable {"model_name":"test-model__isvc-83d6fab7bd","model_version":"1","outputs":[{"name":"Plus214_Output_0","datatype":"FP32","shape":[1,10],"data":[-8.233053,-7.7497034,-3.4236815,12.3630295,-12.079103,17.266596,-10.570976,0.7130762,3.321715,1.3621228]}]} - ${EXPECTED_INFERENCE_OUTPUT_OPENVINO}= Set Variable {"model_name":"test-model__isvc-8655dc7979","model_version":"1","outputs":[{"name":"Func/StatefulPartitionedCall/output/_13:0","datatype":"FP32","shape":[1,1],"data":[0.99999994]}]} - ${runtime_pod_name} = Replace String Using Regexp string=${RUNTIME_NAME} pattern=\\s replace_with=- - ${runtime_pod_name} = Convert To Lower Case ${runtime_pod_name} + # robocop: off=too-long-test-case + # robocop: off=too-many-calls-in-test-case + [Documentation] Verify Model Can Be Deployed Via UI For Upgrade + [Tags] Upgrade + [Setup] Begin Web Test + ${PRJ_TITLE}= Set Variable model-serving-upgrade + ${PRJ_DESCRIPTION}= Set Variable project used for model serving tests + ${MODEL_NAME}= Set Variable test-model + ${MODEL_CREATED}= Set Variable ${FALSE} + ${RUNTIME_NAME}= Set Variable Model Serving Test + ${INFERENCE_INPUT_OPENVINO}= Set Variable + ... @tests/Resources/Files/openvino-example-input.json + ${EXPECTED_INFERENCE_OUTPUT_OPENVINO}= Set Variable + ... {"model_name":"test-model__isvc-8655dc7979","model_version":"1","outputs":[{"name":"Func/StatefulPartitionedCall/output/_13:0","datatype":"FP32","shape":[1,1],"data":[0.99999994]}]} # robocop: disable:line-too-long + ${runtime_pod_name}= Replace String Using Regexp + ... string=${RUNTIME_NAME} + ... pattern=\\s + ... replace_with=- + ${runtime_pod_name}= Convert To Lower Case ${runtime_pod_name} Fetch CA Certificate If RHODS Is Self-Managed Clean All Models Of Current User Open Data Science Projects Home Page - Wait For RHODS Dashboard To Load wait_for_cards=${FALSE} expected_page=Data Science Projects - Create Data Science Project title=${PRJ_TITLE} description=${PRJ_DESCRIPTION} - Create S3 Data Connection project_title=${PRJ_TITLE} dc_name=model-serving-connection - ... aws_access_key=${S3.AWS_ACCESS_KEY_ID} aws_secret_access=${S3.AWS_SECRET_ACCESS_KEY} - ... aws_bucket_name=ods-ci-s3 - Create Model Server token=${FALSE} server_name=${RUNTIME_NAME} - Serve Model project_name=${PRJ_TITLE} model_name=${MODEL_NAME} framework=openvino_ir existing_data_connection=${TRUE} - ... data_connection_name=model-serving-connection model_path=openvino-example-model - Run Keyword And Continue On Failure Wait Until Keyword Succeeds - ... 5 min 10 sec Verify Openvino Deployment runtime_name=${runtime_pod_name} - Run Keyword And Continue On Failure Wait Until Keyword Succeeds 5 min 10 sec Verify Serving Service - Verify Model Status ${MODEL_NAME} success - Set Suite Variable ${MODEL_CREATED} ${TRUE} - Run Keyword And Continue On Failure Verify Model Inference ${MODEL_NAME} ${INFERENCE_INPUT_OPENVINO} ${EXPECTED_INFERENCE_OUTPUT_OPENVINO} token_auth=${FALSE} - Remove File openshift_ca.crt - [Teardown] Run Keywords Dashboard Test Teardown - ... AND - ... Run Keyword If Test Failed Get Events And Pod Logs namespace=${PRJ_TITLE} - ... label_selector=name=modelmesh-serving-${runtime_pod_name} + Wait For RHODS Dashboard To Load + ... wait_for_cards=${FALSE} + ... expected_page=Data Science Projects + Create Data Science Project title=${PRJ_TITLE} description=${PRJ_DESCRIPTION} + Create S3 Data Connection + ... project_title=${PRJ_TITLE} + ... dc_name=model-serving-connection + ... aws_access_key=${S3.AWS_ACCESS_KEY_ID} + ... aws_secret_access=${S3.AWS_SECRET_ACCESS_KEY} + ... aws_bucket_name=ods-ci-s3 + Create Model Server token=${FALSE} server_name=${RUNTIME_NAME} + Serve Model + ... project_name=${PRJ_TITLE} + ... model_name=${MODEL_NAME} + ... framework=openvino_ir + ... existing_data_connection=${TRUE} + ... data_connection_name=model-serving-connection + ... model_path=openvino-example-model + Run Keyword And Continue On Failure + ... Wait Until Keyword Succeeds + ... 5 min + ... 10 sec + ... Verify Openvino Deployment + ... runtime_name=${runtime_pod_name} + Run Keyword And Continue On Failure + ... Wait Until Keyword Succeeds + ... 5 min + ... 10 sec + ... Verify Serving Service + Verify Model Status ${MODEL_NAME} success + Set Suite Variable ${MODEL_CREATED} ${TRUE} # robocop: disable:replace-set-variable-with-var + Run Keyword And Continue On Failure + ... Verify Model Inference + ... ${MODEL_NAME} + ... ${INFERENCE_INPUT_OPENVINO} + ... ${EXPECTED_INFERENCE_OUTPUT_OPENVINO} + ... token_auth=${FALSE} + Remove File openshift_ca.crt + [Teardown] Run Keywords Dashboard Test Teardown + ... AND + ... Run Keyword If Test Failed Get Events And Pod Logs namespace=${PRJ_TITLE} + ... label_selector=name=modelmesh-serving-${runtime_pod_name} Verify User Can Deploy Custom Runtime For Upgrade - [Tags] Upgrade - Create Custom Serving Runtime Using Template By CLI tests/Resources/Files/caikit_runtime_template.yaml + [Documentation] Verify User Can Deploy Custom Runtime For Upgrade + [Tags] Upgrade + Create Custom Serving Runtime Using Template By CLI + ... tests/Resources/Files/caikit_runtime_template.yaml Begin Web Test - Menu.Navigate To Page Settings Serving runtimes - Wait Until Page Contains Add serving runtime timeout=15s - Page Should Contain Element //tr[@id='caikit-runtime'] - [Teardown] Dashboard Test Teardown + Menu.Navigate To Page Settings Serving runtimes + Wait Until Page Contains Add serving runtime timeout=15s + Page Should Contain Element //tr[@id='caikit-runtime'] + [Teardown] Dashboard Test Teardown Verify Distributed Workload Metrics Resources By Creating Ray Cluster Workload + # robocop: off=too-long-test-case + # robocop: off=too-many-calls-in-test-case [Documentation] Creates the Ray Cluster and verify resource usage - [Tags] Upgrade - [Setup] Prepare Codeflare-SDK Test Setup - ${PRJ_UPGRADE} Set Variable test-ns-rayupgrade - ${JOB_NAME} Set Variable mnist - Run Codeflare-SDK Test upgrade raycluster_sdk_upgrade_test.py::TestMNISTRayClusterUp 3.11 ${RAY_CUDA_IMAGE_3.11} ${CODEFLARE-SDK-RELEASE-TAG} - Set Library Search Order SeleniumLibrary + [Tags] Upgrade + [Setup] Prepare Codeflare-SDK Test Setup + ${PRJ_UPGRADE}= Set Variable test-ns-rayupgrade + ${JOB_NAME}= Set Variable mnist + Run Codeflare-SDK Test + ... upgrade + ... raycluster_sdk_upgrade_test.py::TestMNISTRayClusterUp + ... 3.11 + ... ${RAY_CUDA_IMAGE_3.11} + ... ${CODEFLARE-SDK-RELEASE-TAG} + Set Library Search Order SeleniumLibrary RHOSi Setup - Launch Dashboard ${TEST_USER.USERNAME} ${TEST_USER.PASSWORD} ${TEST_USER.AUTH_TYPE} - ... ${ODH_DASHBOARD_URL} ${BROWSER.NAME} ${BROWSER.OPTIONS} + Launch Dashboard + ... ${TEST_USER.USERNAME} + ... ${TEST_USER.PASSWORD} + ... ${TEST_USER.AUTH_TYPE} + ... ${ODH_DASHBOARD_URL} + ... ${BROWSER.NAME} + ... ${BROWSER.OPTIONS} Open Distributed Workload Metrics Home Page - Select Distributed Workload Project By Name ${PRJ_UPGRADE} - Set Global Variable ${DW_PROJECT_CREATED} True - Select Refresh Interval 15 seconds - Wait Until Element Is Visible ${DISTRIBUITED_WORKLOAD_RESOURCE_METRICS_TITLE_XP} timeout=20 - Wait Until Element Is Visible xpath=//*[text()="Running"] timeout=30 - - ${cpu_requested} = Get CPU Requested ${PRJ_UPGRADE} local-queue-mnist - ${memory_requested} = Get Memory Requested ${PRJ_UPGRADE} local-queue-mnist RayCluster - Check Requested Resources Chart ${PRJ_UPGRADE} ${cpu_requested} ${memory_requested} - Check Requested Resources ${PRJ_UPGRADE} ${CPU_SHARED_QUOTA} - ... ${MEMEORY_SHARED_QUOTA} ${cpu_requested} ${memory_requested} RayCluster - - Check Distributed Workload Resource Metrics Status ${JOB_NAME} Running - Check Distributed Worklaod Status Overview ${JOB_NAME} Running - ... All pods were ready or succeeded since the workload admission + Select Distributed Workload Project By Name ${PRJ_UPGRADE} + Set Global Variable ${DW_PROJECT_CREATED} True # robocop: disable:replace-set-variable-with-var + Select Refresh Interval 15 seconds + Wait Until Element Is Visible + ... ${DISTRIBUITED_WORKLOAD_RESOURCE_METRICS_TITLE_XP} + ... timeout=20 + Wait Until Element Is Visible xpath=//*[text()="Running"] timeout=30 + + ${cpu_requested}= Get CPU Requested ${PRJ_UPGRADE} local-queue-mnist + ${memory_requested}= Get Memory Requested ${PRJ_UPGRADE} local-queue-mnist RayCluster + Check Requested Resources Chart ${PRJ_UPGRADE} ${cpu_requested} ${memory_requested} + Check Requested Resources + ... ${PRJ_UPGRADE} + ... ${CPU_SHARED_QUOTA} + ... ${MEMEORY_SHARED_QUOTA} + ... ${cpu_requested} + ... ${memory_requested} + ... RayCluster + + Check Distributed Workload Resource Metrics Status ${JOB_NAME} Running + Check Distributed Worklaod Status Overview ${JOB_NAME} Running + ... All pods were ready or succeeded since the workload admission Click Button ${PROJECT_METRICS_TAB_XP} - Check Distributed Workload Resource Metrics Chart ${PRJ_UPGRADE} ${cpu_requested} - ... ${memory_requested} RayCluster ${JOB_NAME} + Check Distributed Workload Resource Metrics Chart ${PRJ_UPGRADE} ${cpu_requested} + ... ${memory_requested} RayCluster ${JOB_NAME} - [Teardown] Run Keywords Cleanup Codeflare-SDK Setup AND - ... Run Keyword If Test Failed Codeflare Upgrade Tests Teardown ${PRJ_UPGRADE} ${DW_PROJECT_CREATED} + [Teardown] Run Keywords Cleanup Codeflare-SDK Setup AND + ... Run Keyword If Test Failed Codeflare Upgrade Tests Teardown ${PRJ_UPGRADE} ${DW_PROJECT_CREATED} # robocop: disable:line-too-long Run Training Operator ODH Setup PyTorchJob Test Use Case [Documentation] Run Training Operator ODH Setup PyTorchJob Test Use Case - [Tags] Upgrade - [Setup] Prepare Training Operator E2E Upgrade Test Suite - Run Training Operator ODH Upgrade Test TestSetupPytorchjob - [Teardown] Teardown Training Operator E2E Upgrade Test Suite + [Tags] Upgrade + [Setup] Prepare Training Operator E2E Upgrade Test Suite + Run Training Operator ODH Upgrade Test TestSetupPytorchjob + [Teardown] Teardown Training Operator E2E Upgrade Test Suite Run Training Operator ODH Setup Sleep PyTorchJob Test Use Case [Documentation] Setup PyTorchJob which is kept running for 24 hours - [Tags] Upgrade - [Setup] Prepare Training Operator E2E Upgrade Test Suite - Run Training Operator ODH Upgrade Test TestSetupSleepPytorchjob - [Teardown] Teardown Training Operator E2E Upgrade Test Suite + [Tags] Upgrade + [Setup] Prepare Training Operator E2E Upgrade Test Suite + Run Training Operator ODH Upgrade Test TestSetupSleepPytorchjob + [Teardown] Teardown Training Operator E2E Upgrade Test Suite Data Science Pipelines Pre Upgrade Configuration [Documentation] Creates project dsp-test-upgrade and configures the pipeline resources testing upgrade - [Tags] Upgrade DataSciencePipelines-Backend + [Tags] Upgrade DataSciencePipelines-Backend DataSciencePipelinesUpgradeTesting.Setup Environment For Upgrade Testing Model Registry Pre Upgrade Set Up [Documentation] Creates a Model Registry instance and registers a model/version - [Tags] Upgrade ModelRegistryUpgrade + [Tags] Upgrade ModelRegistryUpgrade Model Registry Pre Upgrade Scenario *** Keywords *** Dashboard Suite Setup - [Documentation] Basic suite setup + [Documentation] Basic suite setup Set Library Search Order SeleniumLibrary RHOSi Setup Dashboard Test Teardown - [Documentation] Basic suite teardown + [Documentation] Basic suite teardown Close All Browsers diff --git a/ods_ci/tests/Tests/0200__rhoai_upgrade/0202__during_upgrade.robot b/ods_ci/tests/Tests/0200__rhoai_upgrade/0202__during_upgrade.robot index b34d1e897..740cc93f2 100644 --- a/ods_ci/tests/Tests/0200__rhoai_upgrade/0202__during_upgrade.robot +++ b/ods_ci/tests/Tests/0200__rhoai_upgrade/0202__during_upgrade.robot @@ -1,5 +1,6 @@ *** Settings *** Documentation Test Suite for Upgrade testing,to be run during the upgrade + Resource ../../Resources/ODS.robot Resource ../../Resources/Common.robot Resource ../../Resources/Page/ODH/JupyterHub/JupyterHubSpawner.robot @@ -9,107 +10,146 @@ Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboardSettings.r Resource ../../Resources/Page/ODH/JupyterHub/ODHJupyterhub.resource Library DebugLibrary Library JupyterLibrary + Test Tags DuringUpgrade *** Variables *** -${CODE} while True: import time ; time.sleep(10); print ("Hello") +${CODE} while True: import time ; time.sleep(10); print ("Hello") *** Test Cases *** Long Running Jupyter Notebook [Documentation] Launch a long running notebook before the upgrade - [Tags] Upgrade + [Tags] Upgrade Launch Notebook - Add And Run JupyterLab Code Cell In Active Notebook ${CODE} - ${return_code} ${timestamp} Run And Return Rc And Output oc get pod -n ${NOTEBOOKS_NAMESPACE} jupyter-nb-ldap-2dadmin2-0 --no-headers --output='custom-columns=TIMESTAMP:.metadata.creationTimestamp' #robocop:disable - Should Be Equal As Integers ${return_code} 0 - Set Global Variable ${timestamp} #robocop: disable + Add And Run JupyterLab Code Cell In Active Notebook ${CODE} + # robocop:disable + ${return_code} ${timestamp} = Run And Return Rc And Output + ... oc get pod -n ${NOTEBOOKS_NAMESPACE} jupyter-nb-ldap-2dadmin2-0 --no-headers --output='custom-columns=TIMESTAMP:.metadata.creationTimestamp' + Should Be Equal As Integers ${return_code} 0 + Set Global Variable ${timestamp} # robocop: disable Close Browser Upgrade RHODS [Documentation] Approve the install plan for the upgrade and make sure that upgrade has completed - [Tags] ODS-1766 - ... Upgrade - ${initial_version} = Get RHODS Version - ${initial_creation_date} = Get Operator Pod Creation Date - ${return_code} ${output} Run And Return Rc And Output oc patch installplan $(oc get installplans -n ${OPERATOR_NAMESPACE} | grep -v NAME | awk '{print $1}') -n ${OPERATOR_NAMESPACE} --type='json' -p '[{"op": "replace", "path": "/spec/approved", "value": true}]' #robocop:disable - Should Be Equal As Integers ${return_code} 0 msg=Error while upgrading RHODS - Sleep 30s reason=wait for thirty seconds until old CSV is removed and new one is ready - RHODS Version Should Be Greater Than ${initial_version} - Operator Pod Creation Date Should Be Updated ${initial_creation_date} - OpenShiftLibrary.Wait For Pods Status namespace=${OPERATOR_NAMESPACE} timeout=300 + [Tags] ODS-1766 Upgrade + ${initial_version} = Get RHODS Version + ${initial_creation_date} = Get Operator Pod Creation Date + # robocop:disable + ${return_code} ${output} = Run And Return Rc And Output + ... oc patch installplan $(oc get installplans -n ${OPERATOR_NAMESPACE} | grep -v NAME | awk '{print $1}') -n ${OPERATOR_NAMESPACE} --type='json' -p '[{"op": "replace", "path": "/spec/approved", "value": true}]' + Should Be Equal As Integers + ... ${return_code} + ... 0 + ... msg=Error while upgrading RHODS + Sleep + ... 30s + ... reason=wait for thirty seconds until old CSV is removed and new one is ready + RHODS Version Should Be Greater Than ${initial_version} + Operator Pod Creation Date Should Be Updated ${initial_creation_date} + OpenShiftLibrary.Wait For Pods Status namespace=${OPERATOR_NAMESPACE} timeout=300 TensorFlow Image Test - [Documentation] Run basic tensorflow notebook during upgrade - [Tags] Upgrade - Launch Notebook tensorflow ${TEST_USER.USERNAME} ${TEST_USER.PASSWORD} ${TEST_USER.AUTH_TYPE} - [Teardown] Upgrade Test Teardown + [Documentation] Run basic tensorflow notebook during upgrade + [Tags] Upgrade + Launch Notebook + ... tensorflow + ... ${TEST_USER.USERNAME} + ... ${TEST_USER.PASSWORD} + ... ${TEST_USER.AUTH_TYPE} + [Teardown] Upgrade Test Teardown PyTorch Image Workload Test - [Documentation] Run basic pytorch notebook during upgrade - [Tags] Upgrade - Launch Notebook pytorch ${TEST_USER.USERNAME} ${TEST_USER.PASSWORD} ${TEST_USER.AUTH_TYPE} - Run Repo And Clean https://github.com/lugi0/notebook-benchmarks notebook-benchmarks/pytorch/PyTorch-MNIST-Minimal.ipynb + [Documentation] Run basic pytorch notebook during upgrade + [Tags] Upgrade + Launch Notebook + ... pytorch + ... ${TEST_USER.USERNAME} + ... ${TEST_USER.PASSWORD} + ... ${TEST_USER.AUTH_TYPE} + Run Repo And Clean + ... https://github.com/lugi0/notebook-benchmarks + ... notebook-benchmarks/pytorch/PyTorch-MNIST-Minimal.ipynb Capture Page Screenshot JupyterLab Code Cell Error Output Should Not Be Visible - [Teardown] Upgrade Test Teardown + [Teardown] Upgrade Test Teardown *** Keywords *** Launch Notebook - [Documentation] Launch notebook for the suite - [Arguments] ${notbook_image}=minimal-notebook ${username}=${TEST_USER2.USERNAME} ${password}=${TEST_USER2.PASSWORD} ${auth_type}=${TEST_USER2.AUTH_TYPE} #robocop: disable - Begin Web Test username=${username} password=${password} auth_type=${auth_type} - Login To RHODS Dashboard ${username} ${password} ${auth_type} + [Documentation] Launch notebook for the suite + [Arguments] ${notbook_image}=minimal-notebook + ... ${username}=${TEST_USER2.USERNAME} + ... ${password}=${TEST_USER2.PASSWORD} + ... ${auth_type}=${TEST_USER2.AUTH_TYPE} + # robocop: disable + Begin Web Test username=${username} password=${password} auth_type=${auth_type} + Login To RHODS Dashboard ${username} ${password} ${auth_type} Wait For RHODS Dashboard To Load Launch Jupyter From RHODS Dashboard Link - Login To Jupyterhub ${username} ${password} ${auth_type} - ${authorization_required} Is Service Account Authorization Required + Login To Jupyterhub ${username} ${password} ${auth_type} + ${authorization_required} = Is Service Account Authorization Required IF ${authorization_required} Authorize Jupyterhub Service Account Fix Spawner Status - Spawn Notebook With Arguments image=${notbook_image} username=${username} password=${password} auth_type=${auth_type} #robocop: disable + # robocop: disable + Spawn Notebook With Arguments + ... image=${notbook_image} + ... username=${username} + ... password=${password} + ... auth_type=${auth_type} Upgrade Test Teardown + # robocop: off=too-many-calls-in-keyword + [Documentation] Upgrade Test Teardown End Web Test Skip If RHODS Is Self-Managed ${expression} = Set Variable rhods_aggregate_availability&step=1 ${resp} = Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} Log rhods_aggregate_availability: ${resp.json()["data"]["result"][0]["value"][-1]} - @{list_values} = Create List 1 - Run Keyword And Warn On Failure Should Contain ${list_values} ${resp.json()["data"]["result"][0]["value"][-1]} - ${expression} = Set Variable rhods_aggregate_availability{name="rhods-dashboard"}&step=1 + @{list_values} = Create List 1 # robocop: disable:replace-set-variable-with-var + Run Keyword And Warn On Failure + ... Should Contain + ... ${list_values} + ... ${resp.json()["data"]["result"][0]["value"][-1]} + ${expression} = Set Variable rhods_aggregate_availability{name="rhods-dashboard"}&step=1 ${resp} = Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} Log rhods_aggregate_availability: ${resp.json()["data"]["result"][0]["value"][-1]} - @{list_values} = Create List 1 - Run Keyword And Warn On Failure Should Contain ${list_values} ${resp.json()["data"]["result"][0]["value"][-1]} - ${expression} = Set Variable rhods_aggregate_availability{name="notebook-spawner"}&step=1 + @{list_values} = Create List 1 # robocop: disable:replace-set-variable-with-var + Run Keyword And Warn On Failure + ... Should Contain + ... ${list_values} + ... ${resp.json()["data"]["result"][0]["value"][-1]} + ${expression} = Set Variable rhods_aggregate_availability{name="notebook-spawner"}&step=1 ${resp} = Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} Log rhods_aggregate_availability: ${resp.json()["data"]["result"][0]["value"][-1]} - @{list_values} = Create List 1 - Run Keyword And Warn On Failure Should Contain ${list_values} ${resp.json()["data"]["result"][0]["value"][-1]} + @{list_values} = Create List 1 # robocop: disable:replace-set-variable-with-var + Run Keyword And Warn On Failure + ... Should Contain + ... ${list_values} + ... ${resp.json()["data"]["result"][0]["value"][-1]} RHODS Version Should Be Greater Than [Documentation] Checks if the RHODS version is greater than the given initial version. - ... Fails if the version is not greater. - [Arguments] ${initial_version} - ${ver} = Get RHODS Version - ${ver} = Fetch From Left ${ver} - + ... Fails if the version is not greater. + [Arguments] ${initial_version} + ${ver} = Get RHODS Version + ${ver} = Fetch From Left ${ver} - Should Be True '${ver}' > '${initial_version}' msg=Version wasn't greater than initial one ${initial_version} Get Operator Pod Creation Date [Documentation] Retrieves the creation date of the RHODS operator pod. - ... Returns the creation date as a string. - ... Fails if the command to retrieve the creation date fails. - ${return_code} ${creation_date} = Run And Return Rc And Output - ... oc get pod -n ${OPERATOR_NAMESPACE} -l name=rhods-operator --no-headers -o jsonpath='{.items[0].metadata.creationTimestamp}' - Should Be Equal As Integers ${return_code} 0 msg=Error while getting creation date of the operator pod + ... Returns the creation date as a string. + ... Fails if the command to retrieve the creation date fails. + ${return_code} ${creation_date} = Run And Return Rc And Output + ... oc get pod -n ${OPERATOR_NAMESPACE} -l name=rhods-operator --no-headers -o jsonpath='{.items[0].metadata.creationTimestamp}' #robocop: disable:line-too-long + Should Be Equal As Integers ${return_code} 0 msg=Error while getting creation date of the operator pod RETURN ${creation_date} Operator Pod Creation Date Should Be Updated [Documentation] Checks if the operator pod creation date has been updated after the upgrade. - ... Fails if the updated creation date is not more recent than the initial creation date. - [Arguments] ${initial_creation_date} - ${updated_creation_date} = Get Operator Pod Creation Date + ... Fails if the updated creation date is not more recent than the initial creation date. + [Arguments] ${initial_creation_date} + ${updated_creation_date} = Get Operator Pod Creation Date Should Be True '${updated_creation_date}' > '${initial_creation_date}' ... msg=Operator pod creation date was not updated after upgrade diff --git a/ods_ci/tests/Tests/0200__rhoai_upgrade/0203__post_upgrade.robot b/ods_ci/tests/Tests/0200__rhoai_upgrade/0203__post_upgrade.robot index 5682444b6..cfe28b010 100644 --- a/ods_ci/tests/Tests/0200__rhoai_upgrade/0203__post_upgrade.robot +++ b/ods_ci/tests/Tests/0200__rhoai_upgrade/0203__post_upgrade.robot @@ -1,310 +1,360 @@ *** Settings *** Documentation Test Suite for Upgrade testing,to be run after the upgrade -Library OpenShiftLibrary -Resource ../../Resources/RHOSi.resource -Resource ../../Resources/ODS.robot -Resource ../../Resources/OCP.resource -Resource ../../../tasks/Resources/RHODS_OLM/install/oc_install.robot -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboard.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboardResources.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHModelServing.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/DataConnections.resource -Resource ../../Resources/Page/ODH/JupyterHub/HighAvailability.robot -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/Projects.resource -Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/ModelServer.resource -Resource ../../Resources/Page/ODH/AiApps/Anaconda.resource -Resource ../../Resources/Page/LoginPage.robot -Resource ../../Resources/Page/OCPLogin/OCPLogin.robot -Resource ../../Resources/Common.robot -Resource ../../Resources/Page/OCPDashboard/Pods/Pods.robot -Resource ../../Resources/Page/OCPDashboard/Builds/Builds.robot -Resource ../../Resources/Page/HybridCloudConsole/OCM.robot -Resource ../../Resources/Page/DistributedWorkloads/DistributedWorkloads.resource -Resource ../../Resources/Page/DistributedWorkloads/WorkloadMetricsUI.resource -Resource ../../Resources/CLI/MustGather/MustGather.resource -Resource ../../Resources/CLI/DataSciencePipelines/DataSciencePipelinesUpgradeTesting.resource -Resource ../../Resources/Page/ModelRegistry/ModelRegistry.resource -Suite Setup Upgrade Suite Setup -Test Tags PostUpgrade + +Library OpenShiftLibrary +Resource ../../Resources/RHOSi.resource +Resource ../../Resources/ODS.robot +Resource ../../Resources/OCP.resource +Resource ../../../tasks/Resources/RHODS_OLM/install/oc_install.robot +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboard.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDashboardResources.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHModelServing.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/DataConnections.resource +Resource ../../Resources/Page/ODH/JupyterHub/HighAvailability.robot +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/Projects.resource +Resource ../../Resources/Page/ODH/ODHDashboard/ODHDataScienceProject/ModelServer.resource +Resource ../../Resources/Page/ODH/AiApps/Anaconda.resource +Resource ../../Resources/Page/LoginPage.robot +Resource ../../Resources/Page/OCPLogin/OCPLogin.robot +Resource ../../Resources/Common.robot +Resource ../../Resources/Page/OCPDashboard/Pods/Pods.robot +Resource ../../Resources/Page/OCPDashboard/Builds/Builds.robot +Resource ../../Resources/Page/HybridCloudConsole/OCM.robot +Resource ../../Resources/Page/DistributedWorkloads/DistributedWorkloads.resource +Resource ../../Resources/Page/DistributedWorkloads/WorkloadMetricsUI.resource +Resource ../../Resources/CLI/MustGather/MustGather.resource +Resource ../../Resources/CLI/DataSciencePipelines/DataSciencePipelinesUpgradeTesting.resource +Resource ../../Resources/Page/ModelRegistry/ModelRegistry.resource + +Suite Setup Upgrade Suite Setup + +Test Tags PostUpgrade *** Variables *** -${S_SIZE} 25 -${DW_PROJECT_CREATED}= False +${S_SIZE} 25 +${DW_PROJECT_CREATED} False *** Test Cases *** Verify PVC Size [Documentation] Verify PVC Size after the upgrade - [Tags] Upgrade + [Tags] Upgrade Get Dashboard Config Data - ${size} Set Variable ${payload[0]['spec']['notebookController']['pvcSize']}[:-2] - Should Be Equal As Strings '${size}' '${S_SIZE}' + ${size} Set Variable ${payload[0]['spec']['notebookController']['pvcSize']}[:-2] + Should Be Equal As Strings '${size}' '${S_SIZE}' Verify Pod Toleration [Documentation] Verify Pod toleration after the upgrade - [Tags] Upgrade - ${enable} Set Variable ${payload[0]['spec']['notebookController']['notebookTolerationSettings']['enabled']} - Should Be Equal As Strings '${enable}' 'True' + [Tags] Upgrade + ${enable} Set Variable + ... ${payload[0]['spec']['notebookController']['notebookTolerationSettings']['enabled']} + Should Be Equal As Strings '${enable}' 'True' Verify RHODS User Groups [Documentation] Verify User Configuration after the upgrade - [Tags] Upgrade - ${admin} Set Variable ${payload[0]['spec']['groupsConfig']['adminGroups']} - ${user} Set Variable ${payload[0]['spec']['groupsConfig']['allowedGroups']} - Should Be Equal As Strings '${admin}' 'rhods-admins,rhods-users' - Should Be Equal As Strings '${user}' 'system:authenticated' - [Teardown] Set Default Users + [Tags] Upgrade + ${admin} Set Variable ${payload[0]['spec']['groupsConfig']['adminGroups']} + ${user} Set Variable ${payload[0]['spec']['groupsConfig']['allowedGroups']} + Should Be Equal As Strings '${admin}' 'rhods-admins,rhods-users' + Should Be Equal As Strings '${user}' 'system:authenticated' + [Teardown] Set Default Users Verify Culler is Enabled [Documentation] Verify Culler Configuration after the upgrade - [Tags] Upgrade - ${status} Check If ConfigMap Exists ${APPLICATIONS_NAMESPACE} notebook-controller-culler-config + [Tags] Upgrade + ${status} Check If ConfigMap Exists + ... ${APPLICATIONS_NAMESPACE} + ... notebook-controller-culler-config IF '${status}' != 'PASS' - Fail msg=Culler has been diabled after the upgrade + Fail msg=Culler has been diabled after the upgrade END Verify Notebook Has Not Restarted [Documentation] Verify Notbook pod has not restarted after the upgrade - [Tags] Upgrade - ${return_code} ${new_timestamp} Run And Return Rc And Output oc get pod -n ${NOTEBOOKS_NAMESPACE} jupyter-nb-ldap-2dadmin2-0 --no-headers --output='custom-columns=TIMESTAMP:.metadata.creationTimestamp' #robocop:disable - Should Be Equal As Integers ${return_code} 0 - Should Be Equal ${timestamp} ${new_timestamp} msg=Running notebook pod has restarted + [Tags] Upgrade + # robocop:disable + ${return_code} ${new_timestamp} Run And Return Rc And Output + ... oc get pod -n ${NOTEBOOKS_NAMESPACE} jupyter-nb-ldap-2dadmin2-0 --no-headers --output='custom-columns=TIMESTAMP:.metadata.creationTimestamp' + Should Be Equal As Integers ${return_code} 0 + Should Be Equal ${timestamp} ${new_timestamp} msg=Running notebook pod has restarted Verify Custom Image Is Present - [Tags] Upgrade - [Documentation] Verify Custom Noteboook is not deleted after the upgrade - ${status} Run Keyword And Return Status Oc Get kind=ImageStream namespace=${APPLICATIONS_NAMESPACE} - ... field_selector=metadata.name==byon-upgrade - IF not ${status} Fail Notebook image is deleted after the upgrade - [Teardown] Delete OOTB Image + [Documentation] Verify Custom Noteboook is not deleted after the upgrade + [Tags] Upgrade + ${status} Run Keyword And Return Status + ... Oc Get + ... kind=ImageStream + ... namespace=${APPLICATIONS_NAMESPACE} + ... field_selector=metadata.name==byon-upgrade + IF not ${status} Fail Notebook image is deleted after the upgrade + [Teardown] Delete OOTB Image Verify Disable Runtime Is Present - [Documentation] Disable the Serving runtime using Cli - [Tags] Upgrade - ${rn} Set Variable ${payload[0]['spec']['templateDisablement']} - List Should Contain Value ${rn} ovms-gpu - [Teardown] Enable Model Serving Runtime Using CLI namespace=redhat-ods-applications + [Documentation] Disable the Serving runtime using Cli + [Tags] Upgrade + ${rn} Set Variable ${payload[0]['spec']['templateDisablement']} + List Should Contain Value ${rn} ovms-gpu + [Teardown] Enable Model Serving Runtime Using CLI namespace=redhat-ods-applications Reset PVC Size Via UI [Documentation] Sets a Pod toleration via the admin UI - [Tags] Upgrade - [Setup] Begin Web Test - Set PVC Value In RHODS Dashboard 20 - [Teardown] Dashboard Test Teardown + [Tags] Upgrade + [Setup] Begin Web Test + Set PVC Value In RHODS Dashboard 20 + [Teardown] Dashboard Test Teardown Reset Culler Timeout [Documentation] Sets a culler timeout via the admin UI - [Tags] Upgrade - [Setup] Begin Web Test + [Tags] Upgrade + [Setup] Begin Web Test Disable Notebook Culler - [Teardown] Dashboard Test Teardown + [Teardown] Dashboard Test Teardown Resetting Pod Toleration Via UI [Documentation] Sets a Pod toleration via the admin UI - [Tags] Upgrade - [Setup] Begin Web Test - Menu.Navigate To Page Settings Cluster settings - Wait Until Page Contains Notebook pod tolerations + [Tags] Upgrade + [Setup] Begin Web Test + Menu.Navigate To Page Settings Cluster settings + Wait Until Page Contains Notebook pod tolerations Disable Pod Toleration Via UI Enable "Usage Data Collection" - IF ${is_data_collection_enabled}==True - Fail msg=Usage data colletion is enbaled after the upgrade + IF ${is_data_collection_enabled} + Fail msg=Usage data colletion is enbaled after the upgrade END - [Teardown] Dashboard Test Teardown + [Teardown] Dashboard Test Teardown Verify POD Status [Documentation] Verify all the pods are up and running - [Tags] Upgrade - Wait For Pods Status namespace=${APPLICATIONS_NAMESPACE} timeout=60 - Log Verified ${APPLICATIONS_NAMESPACE} console=yes - Wait For Pods Status namespace=${OPERATOR_NAMESPACE} timeout=60 - Log Verified ${OPERATOR_NAMESPACE} console=yes - Wait For Pods Status namespace=${MONITORING_NAMESPACE} timeout=60 - Log Verified ${MONITORING_NAMESPACE} console=yes - Oc Get kind=Namespace field_selector=metadata.name=${NOTEBOOKS_NAMESPACE} - Log "Verified rhods-notebook" + [Tags] Upgrade + Wait For Pods Status namespace=${APPLICATIONS_NAMESPACE} timeout=60 + Log Verified ${APPLICATIONS_NAMESPACE} console=yes + Wait For Pods Status namespace=${OPERATOR_NAMESPACE} timeout=60 + Log Verified ${OPERATOR_NAMESPACE} console=yes + Wait For Pods Status namespace=${MONITORING_NAMESPACE} timeout=60 + Log Verified ${MONITORING_NAMESPACE} console=yes + Oc Get kind=Namespace field_selector=metadata.name=${NOTEBOOKS_NAMESPACE} + Log "Verified rhods-notebook" Test Inference Post RHODS Upgrade + # robocop: off=too-many-calls-in-test-case + # robocop: off=too-long-test-case [Documentation] Test the inference result after having deployed a model that requires Token Authentication - [Tags] Upgrade - [Setup] Begin Web Test - ${PRJ_TITLE}= Set Variable model-serving-upgrade - ${PRJ_DESCRIPTION}= Set Variable project used for model serving tests - ${MODEL_NAME}= Set Variable test-model - ${MODEL_CREATED}= Set Variable ${FALSE} - ${RUNTIME_NAME}= Set Variable Model Serving Test - ${INFERENCE_INPUT}= Set Variable @tests/Resources/Files/modelmesh-mnist-input.json - ${INFERENCE_INPUT_OPENVINO}= Set Variable @tests/Resources/Files/openvino-example-input.json - ${EXPECTED_INFERENCE_OUTPUT}= Set Variable {"model_name":"test-model__isvc-83d6fab7bd","model_version":"1","outputs":[{"name":"Plus214_Output_0","datatype":"FP32","shape":[1,10],"data":[-8.233053,-7.7497034,-3.4236815,12.3630295,-12.079103,17.266596,-10.570976,0.7130762,3.321715,1.3621228]}]} - ${EXPECTED_INFERENCE_OUTPUT_OPENVINO}= Set Variable {"model_name":"test-model__isvc-8655dc7979","model_version":"1","outputs":[{"name":"Func/StatefulPartitionedCall/output/_13:0","datatype":"FP32","shape":[1,1],"data":[0.99999994]}]} + [Tags] Upgrade + [Setup] Begin Web Test + ${PRJ_TITLE} Set Variable model-serving-upgrade + ${PRJ_DESCRIPTION} Set Variable project used for model serving tests # robocop: off=unused-variable # robocop: disable:line-too-long + ${MODEL_NAME} Set Variable test-model + ${MODEL_CREATED} Set Variable ${FALSE} # robocop: off=unused-variable + ${RUNTIME_NAME} Set Variable Model Serving Test # robocop: off=unused-variable + ${INFERENCE_INPUT} Set Variable @tests/Resources/Files/modelmesh-mnist-input.json # robocop: off=unused-variable # robocop: disable:line-too-long + ${INFERENCE_INPUT_OPENVINO} Set Variable + ... @tests/Resources/Files/openvino-example-input.json + ${EXPECTED_INFERENCE_OUTPUT_OPENVINO} Set Variable + ... {"model_name":"test-model__isvc-8655dc7979","model_version":"1","outputs":[{"name":"Func/StatefulPartitionedCall/output/_13:0","datatype":"FP32","shape":[1,1],"data":[0.99999994]}]}ยบ # robocop: disable:line-too-long Fetch CA Certificate If RHODS Is Self-Managed Open Model Serving Home Page - Verify Model Status ${MODEL_NAME} success - Run Keyword And Continue On Failure Verify Model Inference ${MODEL_NAME} ${INFERENCE_INPUT_OPENVINO} ${EXPECTED_INFERENCE_OUTPUT_OPENVINO} token_auth=${FALSE} - Remove File openshift_ca.crt - [Teardown] Run oc delete project ${PRJ_TITLE} + Verify Model Status ${MODEL_NAME} success + Run Keyword And Continue On Failure + ... Verify Model Inference + ... ${MODEL_NAME} + ... ${INFERENCE_INPUT_OPENVINO} + ... ${EXPECTED_INFERENCE_OUTPUT_OPENVINO} + ... token_auth=${FALSE} + Remove File openshift_ca.crt + [Teardown] Run oc delete project ${PRJ_TITLE} Verify Custom Runtime Exists After Upgrade [Documentation] Test the inference result after having deployed a model that requires Token Authentication - [Tags] Upgrade - [Setup] Begin Web Test - Menu.Navigate To Page Settings Serving runtimes - Wait Until Page Contains Add serving runtime timeout=15s - Page Should Contain Element //tr[@id='caikit-runtime'] - Delete Serving Runtime Template From CLI By Runtime Name OR Display Name runtime_name=caikit-runtime - [Teardown] Dashboard Test Teardown + [Tags] Upgrade + [Setup] Begin Web Test + Menu.Navigate To Page Settings Serving runtimes + Wait Until Page Contains Add serving runtime timeout=15s + Page Should Contain Element //tr[@id='caikit-runtime'] + Delete Serving Runtime Template From CLI By Runtime Name OR Display Name + ... runtime_name=caikit-runtime + [Teardown] Dashboard Test Teardown Verify Ray Cluster Exists And Monitor Workload Metrics By Submitting Ray Job After Upgrade - [Documentation] check the Ray Cluster exists , submit ray job and verify resource usage after upgrade - [Tags] Upgrade - [Setup] Prepare Codeflare-SDK Test Setup - ${PRJ_UPGRADE} Set Variable test-ns-rayupgrade - ${LOCAL_QUEUE} Set Variable local-queue-mnist - ${JOB_NAME} Set Variable mnist - Run Codeflare-SDK Test upgrade raycluster_sdk_upgrade_test.py::TestMnistJobSubmit 3.11 ${RAY_CUDA_IMAGE_3.11} ${CODEFLARE-SDK-RELEASE-TAG} - Set Global Variable ${DW_PROJECT_CREATED} True - Set Library Search Order SeleniumLibrary + # robocop: off=too-long-test-case + # robocop: off=too-many-calls-in-test-case + [Documentation] check the Ray Cluster exists , submit ray job and verify resource usage after upgrade + [Tags] Upgrade + [Setup] Prepare Codeflare-SDK Test Setup + ${PRJ_UPGRADE} Set Variable test-ns-rayupgrade + ${LOCAL_QUEUE} Set Variable local-queue-mnist + ${JOB_NAME} Set Variable mnist + Run Codeflare-SDK Test + ... upgrade + ... raycluster_sdk_upgrade_test.py::TestMnistJobSubmit + ... 3.11 + ... ${RAY_CUDA_IMAGE_3.11} + ... ${CODEFLARE-SDK-RELEASE-TAG} + Set Global Variable ${DW_PROJECT_CREATED} True # robocop: disable:replace-set-variable-with-var + Set Library Search Order SeleniumLibrary RHOSi Setup - Launch Dashboard ${TEST_USER.USERNAME} ${TEST_USER.PASSWORD} ${TEST_USER.AUTH_TYPE} - ... ${ODH_DASHBOARD_URL} ${BROWSER.NAME} ${BROWSER.OPTIONS} + Launch Dashboard + ... ${TEST_USER.USERNAME} + ... ${TEST_USER.PASSWORD} + ... ${TEST_USER.AUTH_TYPE} + ... ${ODH_DASHBOARD_URL} + ... ${BROWSER.NAME} + ... ${BROWSER.OPTIONS} Open Distributed Workload Metrics Home Page - Select Distributed Workload Project By Name ${PRJ_UPGRADE} - Select Refresh Interval 15 seconds - Wait Until Element Is Visible ${DISTRIBUITED_WORKLOAD_RESOURCE_METRICS_TITLE_XP} timeout=20 - Wait Until Element Is Visible xpath=//*[text()="Running"] timeout=30 - - ${cpu_requested} = Get CPU Requested ${PRJ_UPGRADE} ${LOCAL_QUEUE} - ${memory_requested} = Get Memory Requested ${PRJ_UPGRADE} ${LOCAL_QUEUE} RayCluster - Check Requested Resources Chart ${PRJ_UPGRADE} ${cpu_requested} ${memory_requested} - Check Requested Resources ${PRJ_UPGRADE} ${CPU_SHARED_QUOTA} - ... ${MEMEORY_SHARED_QUOTA} ${cpu_requested} ${memory_requested} RayCluster - - Check Distributed Workload Resource Metrics Status ${JOB_NAME} Running - Check Distributed Worklaod Status Overview ${JOB_NAME} Running - ... All pods were ready or succeeded since the workload admission - - Click Button ${PROJECT_METRICS_TAB_XP} - Check Distributed Workload Resource Metrics Chart ${PRJ_UPGRADE} ${cpu_requested} - ... ${memory_requested} RayCluster ${JOB_NAME} - - [Teardown] Run Keywords Cleanup Codeflare-SDK Setup AND - ... Codeflare Upgrade Tests Teardown ${PRJ_UPGRADE} ${DW_PROJECT_CREATED} + Select Distributed Workload Project By Name ${PRJ_UPGRADE} + Select Refresh Interval 15 seconds + Wait Until Element Is Visible + ... ${DISTRIBUITED_WORKLOAD_RESOURCE_METRICS_TITLE_XP} + ... timeout=20 + Wait Until Element Is Visible xpath=//*[text()="Running"] timeout=30 + + ${cpu_requested} Get CPU Requested ${PRJ_UPGRADE} ${LOCAL_QUEUE} + ${memory_requested} Get Memory Requested ${PRJ_UPGRADE} ${LOCAL_QUEUE} RayCluster + Check Requested Resources Chart ${PRJ_UPGRADE} ${cpu_requested} ${memory_requested} + Check Requested Resources + ... ${PRJ_UPGRADE} + ... ${CPU_SHARED_QUOTA} + ... ${MEMEORY_SHARED_QUOTA} + ... ${cpu_requested} + ... ${memory_requested} + ... RayCluster + + Check Distributed Workload Resource Metrics Status ${JOB_NAME} Running + Check Distributed Worklaod Status Overview ${JOB_NAME} Running + ... All pods were ready or succeeded since the workload admission + + Click Button ${PROJECT_METRICS_TAB_XP} + Check Distributed Workload Resource Metrics Chart ${PRJ_UPGRADE} ${cpu_requested} + ... ${memory_requested} RayCluster ${JOB_NAME} + + [Teardown] Run Keywords Cleanup Codeflare-SDK Setup AND + ... Codeflare Upgrade Tests Teardown ${PRJ_UPGRADE} ${DW_PROJECT_CREATED} Run Training Operator ODH Run PyTorchJob Test Use Case [Documentation] Run Training Operator ODH Run PyTorchJob Test Use Case - [Tags] Upgrade - [Setup] Prepare Training Operator E2E Upgrade Test Suite - Run Training Operator ODH Upgrade Test TestRunPytorchjob - [Teardown] Teardown Training Operator E2E Upgrade Test Suite + [Tags] Upgrade + [Setup] Prepare Training Operator E2E Upgrade Test Suite + Run Training Operator ODH Upgrade Test TestRunPytorchjob + [Teardown] Teardown Training Operator E2E Upgrade Test Suite Run Training Operator ODH Run Sleep PyTorchJob Test Use Case [Documentation] Verify that running PyTorchJob Pod wasn't restarted - [Tags] Upgrade - [Setup] Prepare Training Operator E2E Upgrade Test Suite - Run Training Operator ODH Upgrade Test TestVerifySleepPytorchjob - [Teardown] Teardown Training Operator E2E Upgrade Test Suite + [Tags] Upgrade + [Setup] Prepare Training Operator E2E Upgrade Test Suite + Run Training Operator ODH Upgrade Test TestVerifySleepPytorchjob + [Teardown] Teardown Training Operator E2E Upgrade Test Suite Verify that the must-gather image provides RHODS logs and info - [Documentation] Tests the must-gather image for ODH/RHOAI after upgrading - [Tags] Upgrade - Get must-gather Logs - Verify logs for ${APPLICATIONS_NAMESPACE} - IF "${PRODUCT}" == "RHODS" + [Documentation] Tests the must-gather image for ODH/RHOAI after upgrading + [Tags] Upgrade + Get Must-Gather Logs + Verify Logs For ${APPLICATIONS_NAMESPACE} + IF "${PRODUCT}" == "RHODS" Verify Logs For ${OPERATOR_NAMESPACE} - Run Keyword If RHODS Is Managed Verify logs for ${MONITORING_NAMESPACE} + Run Keyword If RHODS Is Managed Verify Logs For ${MONITORING_NAMESPACE} END - [Teardown] Cleanup must-gather Logs + [Teardown] Cleanup Must-Gather Logs -Verify That DSC And DSCI Release.Name Attribute matches ${expected_release_name} +Verify That DSC And DSCI Release.Name Attribute matches ${expected_release_name} # robocop: disable:not-allowed-char-in-name [Documentation] Tests the release.name attribute from the DSC and DSCI matches the desired value. - ... ODH: Open Data Hub - ... RHOAI managed: OpenShift AI Cloud Service - ... RHOAI selfmanaged: OpenShift AI Self-Managed - [Tags] Upgrade - Should Be Equal As Strings ${DSC_RELEASE_NAME} ${expected_release_name} - Should Be Equal As Strings ${DSCI_RELEASE_NAME} ${expected_release_name} - -Verify That DSC And DSCI Release.Version Attribute matches the value in the subscription + ... ODH: Open Data Hub + ... RHOAI managed: OpenShift AI Cloud Service + ... RHOAI selfmanaged: OpenShift AI Self-Managed + [Tags] Upgrade + Should Be Equal As Strings ${DSC_RELEASE_NAME} ${expected_release_name} + Should Be Equal As Strings ${DSCI_RELEASE_NAME} ${expected_release_name} + +Verify That DSC And DSCI Release.Version Attribute matches the value in the subscription # robocop: disable:not-allowed-char-in-name [Documentation] Tests the release.version attribute from the DSC and DSCI matches the value in the subscription. - [Tags] Upgrade - ${rc} ${csv_name}= Run And Return Rc And Output - ... oc get subscription -n ${OPERATOR_NAMESPACE} -l ${OPERATOR_SUBSCRIPTION_LABEL} -ojson | jq '.items[0].status.currentCSV' | tr -d '"' - - Should Be Equal As Integers ${rc} ${0} ${rc} - - ${csv_version}= Get Resource Attribute ${OPERATOR_NAMESPACE} - ... ClusterServiceVersion ${csv_name} .spec.version - - Should Be Equal As Strings ${DSC_RELEASE_VERSION} ${csv_version} - Should Be Equal As Strings ${DSCI_RELEASE_VERSION} ${csv_version} + [Tags] Upgrade + ${rc} ${csv_name} Run And Return Rc And Output + ... oc get subscription -n ${OPERATOR_NAMESPACE} -l ${OPERATOR_SUBSCRIPTION_LABEL} -ojson | jq '.items[0].status.currentCSV' | tr -d '"' # robocop: disable:line-too-long + Should Be Equal As Integers ${rc} ${0} ${rc} + ${csv_version} Get Resource Attribute ${OPERATOR_NAMESPACE} + ... ClusterServiceVersion ${csv_name} .spec.version + Should Be Equal As Strings ${DSC_RELEASE_VERSION} ${csv_version} + Should Be Equal As Strings ${DSCI_RELEASE_VERSION} ${csv_version} Data Science Pipelines Post Upgrade Verifications [Documentation] Verifies the status of the resources created in project dsp-test-upgrade after the upgradea - [Tags] Upgrade DataSciencePipelines-Backend - Skip If Operator Starting Version Is Not Supported minimum_version=2.14.0 + [Tags] Upgrade DataSciencePipelines-Backend + Skip If Operator Starting Version Is Not Supported minimum_version=2.14.0 DataSciencePipelinesUpgradeTesting.Verify Resources After Upgrade Model Registry Post Upgrade Verification [Documentation] Verifies that registered model/version in pre-upgrade is present after the upgrade - [Tags] Upgrade ModelRegistryUpgrade - ... ProductBug RHOAIENG-15033 - Skip If Operator Starting Version Is Not Supported minimum_version=2.14.0 + [Tags] Upgrade ModelRegistryUpgrade ProductBug RHOAIENG-15033 + Skip If Operator Starting Version Is Not Supported minimum_version=2.14.0 Model Registry Post Upgrade Scenario - [Teardown] Post Upgrade Scenario Teardown + [Teardown] Post Upgrade Scenario Teardown *** Keywords *** Dashboard Suite Setup - [Documentation] Basic suite setup + [Documentation] Basic suite setup Set Library Search Order SeleniumLibrary RHOSi Setup Dashboard Test Teardown - [Documentation] Basic suite Teradown + [Documentation] Basic suite Teradown IF not ${IS_SELF_MANAGED} Managed RHOAI Upgrade Test Teardown Close All Browsers Get Dashboard Config Data - [Documentation] Get OdhDashboardConfig CR data - ${payload} Oc Get kind=OdhDashboardConfig namespace=${APPLICATIONS_NAMESPACE} + [Documentation] Get OdhDashboardConfig CR data + ${payload} Oc Get kind=OdhDashboardConfig namespace=${APPLICATIONS_NAMESPACE} ... field_selector=metadata.name==odh-dashboard-config - Set Suite Variable ${payload} #robocop:disable + Set Suite Variable ${payload} # robocop:disable Set Default Users - [Documentation] Set Default user settings + [Documentation] Set Default user settings Set Standard RHODS Groups Variables Set Default Access Groups Settings IF not ${IS_SELF_MANAGED} Managed RHOAI Upgrade Test Teardown Delete OOTB Image - [Documentation] Delete the Custom notbook create - ${status} Run Keyword And Return Status Oc Delete kind=ImageStream name=byon-upgrade namespace=${APPLICATIONS_NAMESPACE} #robocop:disable - IF not ${status} Fail Notebook image is deleted after the upgrade - IF not ${IS_SELF_MANAGED} Managed RHOAI Upgrade Test Teardown + [Documentation] Delete the Custom notbook create + # robocop:disable + ${status} Run Keyword And Return Status + ... Oc Delete + ... kind=ImageStream + ... name=byon-upgrade + ... namespace=${APPLICATIONS_NAMESPACE} + IF not ${status} Fail Notebook image is deleted after the upgrade + IF not ${IS_SELF_MANAGED} Managed RHOAI Upgrade Test Teardown Managed RHOAI Upgrade Test Teardown + # robocop: off=too-many-calls-in-keyword [Documentation] Check rhods_aggregate_availability metric when RHOAI is installed as managed - ${expression} = Set Variable rhods_aggregate_availability&step=1 - ${resp} = Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} + ${expression} Set Variable rhods_aggregate_availability&step=1 + ${resp} Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} Log rhods_aggregate_availability: ${resp.json()["data"]["result"][0]["value"][-1]} - @{list_values} = Create List 1 - Run Keyword And Warn On Failure Should Contain ${list_values} ${resp.json()["data"]["result"][0]["value"][-1]} - ${expression} = Set Variable rhods_aggregate_availability{name="rhods-dashboard"}&step=1 - ${resp} = Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} + @{list_values} Create List 1 # robocop: disable:replace-create-with-var + Run Keyword And Warn On Failure + ... Should Contain + ... ${list_values} + ... ${resp.json()["data"]["result"][0]["value"][-1]} + ${expression} Set Variable rhods_aggregate_availability{name="rhods-dashboard"}&step=1 + ${resp} Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} Log rhods_aggregate_availability: ${resp.json()["data"]["result"][0]["value"][-1]} - @{list_values} = Create List 1 - Run Keyword And Warn On Failure Should Contain ${list_values} ${resp.json()["data"]["result"][0]["value"][-1]} - ${expression} = Set Variable rhods_aggregate_availability{name="notebook-spawner"}&step=1 - ${resp} = Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} + @{list_values} Create List 1 # robocop: disable:replace-create-with-var + Run Keyword And Warn On Failure + ... Should Contain + ... ${list_values} + ... ${resp.json()["data"]["result"][0]["value"][-1]} + ${expression} Set Variable rhods_aggregate_availability{name="notebook-spawner"}&step=1 + ${resp} Prometheus.Run Query ${RHODS_PROMETHEUS_URL} ${RHODS_PROMETHEUS_TOKEN} ${expression} Log rhods_aggregate_availability: ${resp.json()["data"]["result"][0]["value"][-1]} - @{list_values} = Create List 1 - Run Keyword And Warn On Failure Should Contain ${list_values} ${resp.json()["data"]["result"][0]["value"][-1]} + @{list_values} Create List 1 # robocop: disable:replace-create-with-var + Run Keyword And Warn On Failure + ... Should Contain + ... ${list_values} + ... ${resp.json()["data"]["result"][0]["value"][-1]} Upgrade Suite Setup [Documentation] Set of action to run as Suite setup RHOSi Setup - ${IS_SELF_MANAGED}= Is RHODS Self-Managed - Set Suite Variable ${IS_SELF_MANAGED} + ${IS_SELF_MANAGED} Is RHODS Self-Managed + Set Suite Variable ${IS_SELF_MANAGED} # robocop: disable:replace-set-variable-with-var Gather Release Attributes From DSC And DSCI Set Expected Value For Release Name