diff --git a/alembic/versions/3189c039e59b_removing_unnecessary_testrunconfig.py b/alembic/versions/3189c039e59b_removing_unnecessary_testrunconfig.py new file mode 100644 index 00000000..1d4b9f82 --- /dev/null +++ b/alembic/versions/3189c039e59b_removing_unnecessary_testrunconfig.py @@ -0,0 +1,65 @@ +"""Removing unnecessary TestRunConfig + +Revision ID: 3189c039e59b +Revises: 96ee37627a48 +Create Date: 2023-11-22 17:52:57.970522 + +""" +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "3189c039e59b" +down_revision = "96ee37627a48" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + "fk_testrunexecution_test_run_config_id_testrunconfig", + "testrunexecution", + type_="foreignkey", + ) + op.drop_column("testrunexecution", "test_run_config_id") + op.drop_index("ix_testrunconfig_id", table_name="testrunconfig") + op.drop_table("testrunconfig") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "testrunconfig", + sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False), + sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column("dut_name", sa.VARCHAR(), autoincrement=False, nullable=False), + sa.Column( + "created_at", postgresql.TIMESTAMP(), autoincrement=False, nullable=False + ), + sa.Column( + "selected_tests", + postgresql.JSON(astext_type=sa.Text()), + autoincrement=False, + nullable=False, + ), + sa.PrimaryKeyConstraint("id", name="pk_testrunconfig"), + ) + op.create_index("ix_testrunconfig_id", "testrunconfig", ["id"], unique=False) + op.add_column( + "testrunexecution", + sa.Column( + "test_run_config_id", sa.INTEGER(), autoincrement=False, nullable=True + ), + ) + op.create_foreign_key( + "fk_testrunexecution_test_run_config_id_testrunconfig", + "testrunexecution", + "testrunconfig", + ["test_run_config_id"], + ["id"], + ) + # ### end Alembic commands ### diff --git a/app/api/api_v1/api.py b/app/api/api_v1/api.py index cb82e010..f0d6cc93 100644 --- a/app/api/api_v1/api.py +++ b/app/api/api_v1/api.py @@ -21,7 +21,6 @@ projects, test_collections, test_harness_backend_version, - test_run_configs, test_run_executions, utils, ) @@ -39,9 +38,6 @@ prefix="/test_run_executions", tags=["test_run_executions"], ) -api_router.include_router( - test_run_configs.router, prefix="/test_run_configs", tags=["test_run_configs"] -) api_router.include_router(test_harness_backend_version.router, tags=["version"]) api_router.include_router(utils.router, prefix="/utils", tags=["utils"]) diff --git a/app/api/api_v1/endpoints/test_run_configs.py b/app/api/api_v1/endpoints/test_run_configs.py deleted file mode 100644 index 3e95a1b2..00000000 --- a/app/api/api_v1/endpoints/test_run_configs.py +++ /dev/null @@ -1,96 +0,0 @@ -# -# Copyright (c) 2023 Project CHIP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from http import HTTPStatus -from typing import Any, List - -from fastapi import APIRouter, Depends, HTTPException -from sqlalchemy.orm import Session - -from app import crud, schemas -from app.db.session import get_db -from app.test_engine.test_script_manager import TestNotFound - -router = APIRouter() - - -@router.get("/", response_model=List[schemas.TestRunConfig], deprecated=True) -def read_test_run_configs( - db: Session = Depends(get_db), - skip: int = 0, - limit: int = 100, -) -> Any: - """ - Retrieve test_run_configs. - """ - return crud.test_run_config.get_multi(db, skip=skip, limit=limit) - - -@router.post("/", response_model=schemas.TestRunConfig, deprecated=True) -def create_test_run_config( - *, - db: Session = Depends(get_db), - test_run_config_in: schemas.TestRunConfigCreate, -) -> Any: - """ - Create new test run config. - """ - try: - return crud.test_run_config.create(db=db, obj_in=test_run_config_in) - except TestNotFound as e: - raise HTTPException( - status_code=HTTPStatus.UNPROCESSABLE_ENTITY, - detail=f"Invalid test selection: {e}", - ) - - -@router.put("/{id}", response_model=schemas.TestRunConfig, deprecated=True) -def update_test_run_config( - *, - db: Session = Depends(get_db), - id: int, - test_run_config_in: schemas.TestRunConfigUpdate, -) -> Any: - """ - Update a test run config. - """ - test_run_config = crud.test_run_config.get(db=db, id=id) - if not test_run_config: - raise HTTPException( - status_code=HTTPStatus.NOT_FOUND, detail="Test run config not found" - ) - - test_run_config = crud.test_run_config.update( - db=db, db_obj=test_run_config, obj_in=test_run_config_in - ) - return test_run_config - - -@router.get("/{id}", response_model=schemas.TestRunConfig, deprecated=True) -def read_test_run_config( - *, - db: Session = Depends(get_db), - id: int, -) -> Any: - """ - Get test run config by ID. - """ - test_run_config = crud.test_run_config.get(db=db, id=id) - if not test_run_config: - raise HTTPException( - status_code=HTTPStatus.NOT_FOUND, detail="Test run config not found" - ) - - return test_run_config diff --git a/app/api/api_v1/endpoints/test_run_executions.py b/app/api/api_v1/endpoints/test_run_executions.py index 33184224..4d194628 100644 --- a/app/api/api_v1/endpoints/test_run_executions.py +++ b/app/api/api_v1/endpoints/test_run_executions.py @@ -82,9 +82,8 @@ def create_test_run_execution( """ Create new test run execution. """ - test_run_execution_in.selected_tests = selected_tests - test_run_execution = crud.test_run_execution.create( - db=db, obj_in=test_run_execution_in + test_run_execution = crud.test_run_execution.create_with_selected_tests( + db=db, obj_in=test_run_execution_in, selected_tests=selected_tests ) return test_run_execution @@ -259,13 +258,12 @@ def repeat_test_run_execution( test_run_execution_in.description = execution_to_repeat.description test_run_execution_in.project_id = execution_to_repeat.project_id test_run_execution_in.operator_id = execution_to_repeat.operator_id - test_run_execution_in.selected_tests = selected_tests_from_execution( - execution_to_repeat - ) - # TODO: Remove test_run_config completely from the project - test_run_execution_in.test_run_config_id = None - return crud.test_run_execution.create(db=db, obj_in=test_run_execution_in) + return crud.test_run_execution.create_with_selected_tests( + db=db, + obj_in=test_run_execution_in, + selected_tests=selected_tests_from_execution(execution_to_repeat), + ) @router.delete("/{id}", response_model=schemas.TestRunExecutionInDBBase) diff --git a/app/crud/__init__.py b/app/crud/__init__.py index b7269fb8..713e6956 100644 --- a/app/crud/__init__.py +++ b/app/crud/__init__.py @@ -18,7 +18,6 @@ from .crud_project import project from .crud_test_case_execution import test_case_execution from .crud_test_case_metadata import test_case_metadata -from .crud_test_run_config import test_run_config from .crud_test_run_execution import test_run_execution from .crud_test_step_execution import test_step_execution from .crud_test_suite_execution import test_suite_execution diff --git a/app/crud/crud_test_run_config.py b/app/crud/crud_test_run_config.py deleted file mode 100644 index 2f985697..00000000 --- a/app/crud/crud_test_run_config.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 2023 Project CHIP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from fastapi.encoders import jsonable_encoder -from sqlalchemy.orm import Session - -from app.crud.base import CRUDBase, CRUDOperationNotSupported -from app.models.test_run_config import TestRunConfig -from app.schemas.test_run_config import TestRunConfigCreate, TestRunConfigUpdate -from app.test_engine.test_script_manager import test_script_manager - - -class CRUDTestRunConfig( - CRUDBase[TestRunConfig, TestRunConfigCreate, TestRunConfigUpdate] -): - # We overrite the create method, to add validation of valid selected tests - def create(self, db: Session, *, obj_in: TestRunConfigCreate) -> TestRunConfig: - test_script_manager.validate_test_selection(obj_in.selected_tests) - obj_in_data = jsonable_encoder(obj_in) - db_obj = TestRunConfig(**obj_in_data) - db.add(db_obj) - db.commit() - db.refresh(db_obj) - return db_obj - - def remove(self, db: Session, *, id: int) -> TestRunConfig: - raise CRUDOperationNotSupported("You cannot remove Test Run Config") - - -test_run_config = CRUDTestRunConfig(TestRunConfig) diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index 52965809..f73127da 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -22,11 +22,10 @@ from app.crud import operator as crud_operator from app.crud import project as crud_project -from app.crud import test_run_config as crud_test_run_config from app.crud.base import CRUDBaseCreate, CRUDBaseDelete, CRUDBaseRead from app.models import Project, TestCaseExecution, TestRunExecution, TestSuiteExecution from app.schemas import ( - TestRunConfigCreate, + SelectedTests, TestRunExecutionToExport, TestRunExecutionToImport, ) @@ -176,9 +175,22 @@ def create( test_run_execution = super().create(db=db, obj_in=obj_in) + db.commit() + db.refresh(test_run_execution) + return test_run_execution + + def create_with_selected_tests( + self, + db: Session, + obj_in: TestRunExecutionCreate, + selected_tests: SelectedTests, + **kwargs: Optional[dict], + ) -> TestRunExecution: + test_run_execution = self.create(db, obj_in=obj_in, **kwargs) + test_suites = ( test_script_manager.pending_test_suite_executions_for_selected_tests( - obj_in.selected_tests + selected_tests ) ) @@ -258,12 +270,6 @@ def import_execution( ) imported_execution.operator_id = operator_id - if execution.test_run_config: - test_run_config = crud_test_run_config.create( - db=db, obj_in=TestRunConfigCreate(**execution.test_run_config.__dict__) - ) - imported_execution.test_run_config_id = test_run_config.id - imported_model = TestRunExecution(**jsonable_encoder(imported_execution)) db.add(imported_model) diff --git a/app/db/base.py b/app/db/base.py index 914083c0..1863b033 100644 --- a/app/db/base.py +++ b/app/db/base.py @@ -20,7 +20,6 @@ from app.models.project import Project # noqa from app.models.test_case_execution import TestCaseExecution # noqa from app.models.test_case_metadata import TestCaseMetadata # noqa -from app.models.test_run_config import TestRunConfig # noqa from app.models.test_run_execution import TestRunExecution # noqa from app.models.test_step_execution import TestStepExecution # noqa from app.models.test_suite_execution import TestSuiteExecution # noqa diff --git a/app/models/__init__.py b/app/models/__init__.py index d99654e9..3be70fc9 100644 --- a/app/models/__init__.py +++ b/app/models/__init__.py @@ -18,7 +18,6 @@ from .test_case_execution import TestCaseExecution from .test_case_metadata import TestCaseMetadata from .test_enums import TestStateEnum -from .test_run_config import TestRunConfig from .test_run_execution import TestRunExecution from .test_step_execution import TestStepExecution from .test_suite_execution import TestSuiteExecution diff --git a/app/models/test_run_config.py b/app/models/test_run_config.py deleted file mode 100644 index b634d3df..00000000 --- a/app/models/test_run_config.py +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright (c) 2023 Project CHIP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from datetime import datetime -from typing import TYPE_CHECKING - -from sqlalchemy import JSON -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from app.db.base_class import Base - -if TYPE_CHECKING: - from .test_run_execution import TestRunExecution # noqa: F401 - - -class TestRunConfig(Base): - __test__ = False # Needed to indicate to PyTest that this is not a "test" - - id: Mapped[int] = mapped_column(primary_key=True, index=True) - name: Mapped[str] = mapped_column(nullable=False) - dut_name: Mapped[str] = mapped_column(nullable=False) - created_at: Mapped[datetime] = mapped_column(default=datetime.now) - - selected_tests: Mapped[dict] = mapped_column(JSON, default={}, nullable=False) - - test_run_executions: Mapped[list["TestRunExecution"]] = relationship( - "TestRunExecution", back_populates="test_run_config", uselist=True - ) diff --git a/app/models/test_run_execution.py b/app/models/test_run_execution.py index cd938723..9620b17d 100644 --- a/app/models/test_run_execution.py +++ b/app/models/test_run_execution.py @@ -31,7 +31,6 @@ if TYPE_CHECKING: from .operator import Operator # noqa: F401 from .project import Project # noqa: F401 - from .test_run_config import TestRunConfig # noqa: F401 class TestRunExecution(Base): @@ -53,12 +52,6 @@ class TestRunExecution(Base): description: Mapped[Optional[str]] = mapped_column(default=None, nullable=True) - test_run_config_id: Mapped[Optional[int]] = mapped_column( - ForeignKey("testrunconfig.id"), nullable=True - ) - test_run_config: Mapped["TestRunConfig"] = relationship( - "TestRunConfig", back_populates="test_run_executions" - ) log: Mapped[list[TestRunLogEntry]] = mapped_column( MutableList.as_mutable(PydanticListType(TestRunLogEntry)), default=[], diff --git a/app/schemas/__init__.py b/app/schemas/__init__.py index 77ce94ca..c1f88e44 100644 --- a/app/schemas/__init__.py +++ b/app/schemas/__init__.py @@ -30,12 +30,6 @@ from .test_collections import TestCollections from .test_environment_config import TestEnvironmentConfig from .test_harness_backend_version import TestHarnessBackendVersion -from .test_run_config import ( - TestRunConfig, - TestRunConfigCreate, - TestRunConfigInDB, - TestRunConfigUpdate, -) from .test_run_execution import ( ExportedTestRunExecution, TestRunExecution, diff --git a/app/schemas/test_run_execution.py b/app/schemas/test_run_execution.py index 817a89b9..29d4abd2 100644 --- a/app/schemas/test_run_execution.py +++ b/app/schemas/test_run_execution.py @@ -19,10 +19,8 @@ from pydantic import BaseModel from app.models.test_enums import TestStateEnum -from app.schemas.test_selection import SelectedTests from .operator import Operator, OperatorToExport -from .test_run_config import TestRunConfigToExport from .test_run_log_entry import TestRunLogEntry from .test_suite_execution import TestSuiteExecution, TestSuiteExecutionToExport @@ -44,7 +42,6 @@ class TestRunExecutionBase(BaseModel): # Base + properties that represent relationhips class TestRunExecutionBaseWithRelationships(TestRunExecutionBase): - test_run_config_id: Optional[int] project_id: Optional[int] @@ -52,7 +49,6 @@ class TestRunExecutionBaseWithRelationships(TestRunExecutionBase): class TestRunExecutionCreate(TestRunExecutionBaseWithRelationships): # TODO(#124): Require project ID when UI supports project management. operator_id: Optional[int] - selected_tests: Optional[SelectedTests] # Properties shared by models stored in DB @@ -107,7 +103,6 @@ class Config: # Schema used to export test run executions class TestRunExecutionToExport(TestRunExecutionExportImportBase): operator: Optional[OperatorToExport] - test_run_config: Optional[TestRunConfigToExport] # Schema used to export test run executions @@ -124,4 +119,3 @@ class TestRunExecutionToImport(TestRunExecutionExportImportBase): project_id: Optional[int] operator_id: Optional[int] imported_at: Optional[datetime] - test_run_config_id: Optional[int] diff --git a/app/test_engine/test_script_manager.py b/app/test_engine/test_script_manager.py index 372cfa86..097639c5 100644 --- a/app/test_engine/test_script_manager.py +++ b/app/test_engine/test_script_manager.py @@ -365,4 +365,4 @@ def __validate_test_case_selection_in_collection( ) -test_script_manager = TestScriptManager() +test_script_manager: TestScriptManager = TestScriptManager() diff --git a/app/tests/api/api_v1/test_test_run_configs.py b/app/tests/api/api_v1/test_test_run_configs.py deleted file mode 100644 index ada93e8b..00000000 --- a/app/tests/api/api_v1/test_test_run_configs.py +++ /dev/null @@ -1,108 +0,0 @@ -# -# Copyright (c) 2023 Project CHIP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from http import HTTPStatus - -import pytest -from fastapi.testclient import TestClient -from sqlalchemy.orm import Session - -from app.core.config import settings -from app.tests.utils.test_run_config import create_random_test_run_config - - -@pytest.mark.deprecated("The test_run_config is now deprecated along with this test") -def test_create_test_run_config(client: TestClient, db: Session) -> None: - data = { - "name": "Foo", - "dut_name": "Fighters", - "selected_tests": { - "sample_tests": { - "SampleTestSuite1": { - "TCSS1001": 1, - "TCSS1002": 2, - "TCSS1003": 2, - "TCSS1004": 5, - "TCSS1005": 8, - }, - }, - }, - } - response = client.post( - f"{settings.API_V1_STR}/test_run_configs/", - json=data, - ) - assert response.status_code == HTTPStatus.OK - content = response.json() - assert content["name"] == data["name"] - assert content["dut_name"] == data["dut_name"] - assert content["selected_tests"] == data["selected_tests"] - assert "id" in content - - -@pytest.mark.deprecated("The test_run_config is now deprecated along with this test") -def test_create_test_run_config_invalid_selection( - client: TestClient, db: Session -) -> None: - data = { - "name": "Foo", - "dut_name": "Fighters", - "selected_tests": { - "sample_tests": { - "SampleTestSuite1": { - "TCSS1001": 1, - "TCSS1002": 2, - "TCSS1003": 2, - "TCSS1004": 5, - "Invalid": 8, - }, - }, - }, - } - response = client.post( - f"{settings.API_V1_STR}/test_run_configs/", - json=data, - ) - assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY - content = response.json() - assert "detail" in content - - -@pytest.mark.deprecated("The test_run_config is now deprecated along with this test") -def test_read_test_run_config(client: TestClient, db: Session) -> None: - test_run_config = create_random_test_run_config(db) - response = client.get( - f"{settings.API_V1_STR}/test_run_configs/{test_run_config.id}", - ) - assert response.status_code == HTTPStatus.OK - content = response.json() - assert content["name"] == test_run_config.name - assert content["dut_name"] == test_run_config.dut_name - assert content["id"] == test_run_config.id - assert content["selected_tests"] == test_run_config.selected_tests - - -@pytest.mark.deprecated("The test_run_config is now deprecated along with this test") -def test_update_test_run_config(client: TestClient, db: Session) -> None: - test_run_config = create_random_test_run_config(db) - data = {"name": "Updated Name"} - response = client.put( - f"{settings.API_V1_STR}/test_run_configs/{test_run_config.id}", - json=data, - ) - assert response.status_code == HTTPStatus.OK - content = response.json() - assert content["name"] == data["name"] - assert content["id"] == test_run_config.id diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index 65941422..f354cdcc 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -31,6 +31,7 @@ from app.core.config import settings from app.models import TestRunExecution from app.models.test_enums import TestStateEnum +from app.schemas import SelectedTests from app.test_engine import ( TEST_ENGINE_ABORTING_TESTING_MESSAGE, TEST_ENGINE_BUSY_MESSAGE, @@ -41,7 +42,6 @@ from app.tests.test_engine.test_runner import load_test_run_for_test_cases from app.tests.utils.operator import create_random_operator from app.tests.utils.project import create_random_project -from app.tests.utils.test_run_config import create_random_test_run_config from app.tests.utils.test_run_execution import ( create_random_test_run_execution, create_random_test_run_execution_archived, @@ -64,32 +64,44 @@ def test_create_test_run_execution_with_selected_tests_succeeds( """ title = "Foo" description = random_lower_string() + selected_tests = { + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": "TCSS1001", "iterations": 1}, + {"public_id": "TCSS1002", "iterations": 2}, + {"public_id": "TCSS1003", "iterations": 2}, + {"public_id": "TCSS1004", "iterations": 5}, + {"public_id": "TCSS1005", "iterations": 8}, + ], + } + ], + } + ] + } json_data = { "test_run_execution_in": { "title": title, "description": description, }, - "selected_tests": { - "sample_tests": { - "SampleTestSuite1": { - "TCSS1001": 1, - "TCSS1002": 2, - "TCSS1003": 2, - "TCSS1004": 5, - "TCSS1005": 8, - }, - }, - }, + "selected_tests": selected_tests, } response = client.post( f"{settings.API_V1_STR}/test_run_executions/", json=json_data, ) - assert response.status_code == HTTPStatus.OK - content = response.json() - assert isinstance(content, dict) - assert content.get("title") == title - assert content.get("description") == description + validate_json_response( + response=response, + expected_status_code=HTTPStatus.OK, + expected_content={ + "title": title, + "description": description, + }, + ) def test_create_test_run_execution_with_selected_tests_and_operator_succeeds( @@ -166,51 +178,6 @@ def test_create_test_run_execution_with_selected_tests_project_operator_succeeds assert response_operator["name"] == operator.name -def test_create_test_run_execution_with_test_run_config_and_selected_tests_succeeds( - client: TestClient, db: Session -) -> None: - """This test will create a new test run execution. A success is expected. - The selected tests are passed directly by JSON payload. - Also, one reference to a test run config is also included, but this is ignored by - the API by assigning None. - """ - - test_run_config = create_random_test_run_config(db) - title = "TestRunExecutionFoo" - description = random_lower_string() - json_data = { - "test_run_execution_in": { - "title": title, - "description": description, - "test_run_config_id": test_run_config.id, - }, - "selected_tests": { - "sample_tests": { - "SampleTestSuite1": { - "TCSS1001": 1, - "TCSS1002": 2, - "TCSS1003": 4, - "TCSS1004": 8, - "TCSS1005": 16, - }, - }, - }, - } - response = client.post( - f"{settings.API_V1_STR}/test_run_executions/", - json=json_data, - ) - validate_json_response( - response=response, - expected_status_code=HTTPStatus.OK, - expected_content={ - "title": title, - "description": description, - "test_run_config_id": None, - }, - ) - - def test_create_test_run_execution_with_selected_tests_with_two_suites_succeeds( client: TestClient, ) -> None: @@ -221,29 +188,41 @@ def test_create_test_run_execution_with_selected_tests_with_two_suites_succeeds( title = "TestRunExecutionFoo" description = random_lower_string() + selected_tests = { + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": "TCSS1001", "iterations": 1}, + {"public_id": "TCSS1002", "iterations": 2}, + {"public_id": "TCSS1003", "iterations": 4}, + {"public_id": "TCSS1004", "iterations": 8}, + {"public_id": "TCSS1005", "iterations": 16}, + ], + }, + { + "public_id": "SampleTestSuite2", + "test_cases": [ + {"public_id": "TCSS2001", "iterations": 1}, + {"public_id": "TCSS2002", "iterations": 2}, + {"public_id": "TCSS2003", "iterations": 4}, + {"public_id": "TCSS2004", "iterations": 8}, + {"public_id": "TCSS2005", "iterations": 16}, + ], + }, + ], + } + ] + } json_data = { "test_run_execution_in": { "title": title, "description": description, }, - "selected_tests": { - "sample_tests": { - "SampleTestSuite1": { - "TCSS1001": 1, - "TCSS1002": 2, - "TCSS1003": 4, - "TCSS1004": 8, - "TCSS1005": 16, - }, - "SampleTestSuite2": { - "TCSS2001": 1, - "TCSS2002": 2, - "TCSS2003": 4, - "TCSS2004": 8, - "TCSS2005": 16, - }, - }, - }, + "selected_tests": selected_tests, } response = client.post( f"{settings.API_V1_STR}/test_run_executions/", @@ -259,8 +238,10 @@ def test_create_test_run_execution_with_selected_tests_with_two_suites_succeeds( content = response.json() suites = content.get("test_suite_executions") returned_suites = [s["public_id"] for s in suites] - selected_tests = json_data["selected_tests"]["sample_tests"].keys() - for selected_suite in selected_tests: + selected_suites = [ + s["public_id"] for s in selected_tests["collections"][0]["test_suites"] + ] + for selected_suite in selected_suites: assert selected_suite in returned_suites @@ -301,13 +282,32 @@ def test_repeat_existing_test_run_execution_with_two_suites_succeeds( """ selected_tests = { - "sample_tests": { - "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3}, - "SampleTestSuite2": {"TCSS2004": 4, "TCSS2005": 5, "TCSS2006": 6}, - } + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": "TCSS1001", "iterations": 1}, + {"public_id": "TCSS1002", "iterations": 2}, + {"public_id": "TCSS1003", "iterations": 3}, + ], + }, + { + "public_id": "SampleTestSuite2", + "test_cases": [ + {"public_id": "TCSS2004", "iterations": 4}, + {"public_id": "TCSS2005", "iterations": 5}, + {"public_id": "TCSS2006", "iterations": 6}, + ], + }, + ], + } + ] } test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=SelectedTests(**selected_tests) ) base_title = remove_title_date(test_run_execution.title) @@ -328,7 +328,11 @@ def test_repeat_existing_test_run_execution_with_two_suites_succeeds( suites = content.get("test_suite_executions") returned_suites = [s["public_id"] for s in suites] - for selected_suite in selected_tests["sample_tests"].keys(): + selected_suites = [ + s["public_id"] for s in selected_tests["collections"][0]["test_suites"] + ] + + for selected_suite in selected_suites: assert selected_suite in returned_suites @@ -341,12 +345,24 @@ def test_repeat_existing_test_run_execution_with_title_succeeds( """ title = "TestRunExecutionFoo" selected_tests = { - "sample_tests": { - "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3} - } + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": "TCSS1001", "iterations": 1}, + {"public_id": "TCSS1002", "iterations": 2}, + {"public_id": "TCSS1003", "iterations": 3}, + ], + } + ], + } + ] } test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=SelectedTests(**selected_tests) ) url = f"{settings.API_V1_STR}/test_run_executions/{test_run_execution.id}/repeat" response = client.post(url + f"?title={title}") @@ -828,12 +844,24 @@ async def test_test_runner_status_running( async_client: AsyncClient, db: Session ) -> None: selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + } + ], + } + ] } - test_runner = load_test_run_for_test_cases(db=db, test_cases=selected_tests) + test_runner = load_test_run_for_test_cases( + db=db, test_cases=SelectedTests(**selected_tests) + ) # Start running tests (async) run_task = asyncio.create_task(test_runner.run()) diff --git a/app/tests/crud/test_test_run_config.py b/app/tests/crud/test_test_run_config.py deleted file mode 100644 index a1eefacb..00000000 --- a/app/tests/crud/test_test_run_config.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (c) 2023 Project CHIP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import pytest -from sqlalchemy.orm import Session - -from app import crud -from app.schemas.test_run_config import TestRunConfigUpdate -from app.tests.utils.test_run_config import create_random_test_run_config -from app.tests.utils.utils import random_lower_string - - -def test_create_test_run_config(db: Session) -> None: - # Create build new test_run_config - name = random_lower_string() - dut_name = random_lower_string() - selected_tests = { - "sample_tests": { - "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3} - } - } - test_run_config = create_random_test_run_config( - db=db, name=name, dut_name=dut_name, selected_tests=selected_tests - ) - - # assert created db values match - assert test_run_config.name == name - assert test_run_config.dut_name == dut_name - assert test_run_config.selected_tests == selected_tests - - -def test_get_test_run_config(db: Session) -> None: - # Create build new test_run_config - test_run_config = create_random_test_run_config(db=db) - - # load stored test_run_config from DB - stored_test_run_config = crud.test_run_config.get(db=db, id=test_run_config.id) - - # assert stored values match - assert stored_test_run_config - assert test_run_config.id == stored_test_run_config.id - assert test_run_config.name == stored_test_run_config.name - assert test_run_config.dut_name == stored_test_run_config.dut_name - assert test_run_config.selected_tests == stored_test_run_config.selected_tests - - -def test_update_test_run_config(db: Session) -> None: - # Create build new test_run_config - test_run_config = create_random_test_run_config(db=db) - - # Prepare an update: - name_update = random_lower_string() - test_run_config_update = TestRunConfigUpdate(name=name_update) - - # Perform update - test_run_config_updated = crud.test_run_config.update( - db=db, db_obj=test_run_config, obj_in=test_run_config_update - ) - - assert test_run_config_updated.id == test_run_config.id - assert test_run_config_updated.name == test_run_config.name - - -def test_delete_test_run_config(db: Session) -> None: - # Create build new test_run_config - test_run_config = create_random_test_run_config(db=db) - - # Assert that remove is not supported - with pytest.raises(crud.CRUDOperationNotSupported): - crud.test_run_config.remove(db=db, id=test_run_config.id) diff --git a/app/tests/crud/test_test_run_execution.py b/app/tests/crud/test_test_run_execution.py index 26299928..29db5a62 100644 --- a/app/tests/crud/test_test_run_execution.py +++ b/app/tests/crud/test_test_run_execution.py @@ -28,17 +28,13 @@ from app import crud, models, schemas from app.crud.crud_test_run_execution import ImportError from app.models.test_enums import TestStateEnum -from app.schemas.test_run_config import TestRunConfigCreate +from app.schemas import SelectedTests from app.schemas.test_run_execution import ( TestRunExecutionCreate, TestRunExecutionWithStats, ) from app.tests.utils.operator import operator_base_dict from app.tests.utils.project import create_random_project -from app.tests.utils.test_run_config import ( - random_test_run_config_dict, - test_run_config_base_dict, -) from app.tests.utils.test_run_execution import ( create_random_test_run_execution, create_random_test_run_execution_archived, @@ -63,7 +59,7 @@ def test_get_test_run_execution(db: Session) -> None: # Save create test_run_execution in DB test_run_execution = crud.test_run_execution.create( - db=db, obj_in=test_run_execution_in, selected_tests={} + db=db, obj_in=test_run_execution_in ) # load stored test_run_execution from DB @@ -140,7 +136,7 @@ def test_delete_test_run_execution(db: Session) -> None: # Save create test_run_execution in DB test_run_execution = crud.test_run_execution.create( - db=db, obj_in=test_run_execution_in, selected_tests={} + db=db, obj_in=test_run_execution_in ) # Make sure DB session doesn't reuse models @@ -167,12 +163,25 @@ def test_delete_test_run_execution_with_a_test_suite(db: Session) -> None: test_run_execution_dict = random_test_run_execution_dict(title=title) test_run_execution_in = TestRunExecutionCreate(**test_run_execution_dict) + selected_tests = { + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [{"public_id": "TCSS1001", "iterations": 1}], + } + ], + } + ] + } # Save create test_run_execution in DB - test_run_execution = crud.test_run_execution.create( + test_run_execution = crud.test_run_execution.create_with_selected_tests( db=db, obj_in=test_run_execution_in, - selected_tests={"sample_tests": {"SampleTestSuite1": {"TCSS1001": 1}}}, + selected_tests=SelectedTests(**selected_tests), ) assert len(test_run_execution.test_suite_executions) == 1 suite = test_run_execution.test_suite_executions[0] @@ -277,100 +286,44 @@ def test_get_test_run_executions_archived_by_project(db: Session) -> None: assert not any(t.id == archived_test_run_execution.id for t in test_run_executions) -def test_create_test_run_execution_from_test_run_config(db: Session) -> None: - # Create build new test_run_config object - name = random_lower_string() - dut_name = random_lower_string() - first_test_suite_identifier = "SampleTestSuite1" - first_test_case_identifier = "TCSS1001" - - selected_tests = { - "sample_tests": { - first_test_suite_identifier: { - first_test_case_identifier: 1, - "TCSS1002": 2, - "TCSS1003": 3, - } - } - } - - total_test_case_count = sum( - selected_tests["sample_tests"][first_test_suite_identifier].values() - ) - test_run_config_dict = random_test_run_config_dict( - name=name, dut_name=dut_name, selected_tests=selected_tests - ) - - test_run_config_in = TestRunConfigCreate(**test_run_config_dict) - - # Save create test_run_config in DB - test_run_config = crud.test_run_config.create(db=db, obj_in=test_run_config_in) - - # Prepare data for test_run_execution - test_run_execution_title = "Test Execution title" - test_run_execution_data = TestRunExecutionCreate( - title=test_run_execution_title, test_run_config_id=test_run_config.id - ) - - test_run_execution = crud.test_run_execution.create( - db=db, obj_in=test_run_execution_data - ) - - # Assert direct properties - assert test_run_execution.title == test_run_execution_title - assert test_run_execution.test_run_config_id == test_run_config.id - - # Assert created test_suite_executions - test_suite_executions = test_run_execution.test_suite_executions - assert len(test_suite_executions) > 0 - - first_test_suite_execution = test_suite_executions[0] - test_case_executions = first_test_suite_execution.test_case_executions - assert len(test_case_executions) == total_test_case_count - - first_test_case_execution = test_case_executions[0] - assert first_test_case_execution.public_id == first_test_case_identifier - - remaining_test_cases = selected_tests["sample_tests"][first_test_suite_identifier] - for test_case_execution in test_case_executions: - public_id = test_case_execution.public_id - # Assert all test case public id's match - assert public_id in remaining_test_cases - remaining_test_cases[public_id] -= 1 - - # Assert the correct number of test cases where created - for _, missing_count in remaining_test_cases.items(): - assert missing_count == 0 - - def test_create_test_run_execution_from_selected_tests(db: Session) -> None: - first_test_suite_identifier = "SampleTestSuite1" first_test_case_identifier = "TCSS1001" selected_tests = { - "sample_tests": { - first_test_suite_identifier: { - first_test_case_identifier: 1, - "TCSS1002": 2, - "TCSS1003": 3, + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": first_test_case_identifier, "iterations": 1}, + {"public_id": "TCSS1002", "iterations": 2}, + {"public_id": "TCSS1003", "iterations": 3}, + ], + } + ], } - } + ] } - total_test_case_count = sum( - selected_tests["sample_tests"][first_test_suite_identifier].values() - ) + values = [ + x["iterations"] + for x in selected_tests["collections"][0]["test_suites"][0]["test_cases"] + ] + total_test_case_count = sum(values) # Prepare data for test_run_execution test_run_execution_title = "Test Execution title" test_run_execution_data = TestRunExecutionCreate(title=test_run_execution_title) - test_run_execution = crud.test_run_execution.create( - db=db, obj_in=test_run_execution_data, selected_tests=selected_tests + test_run_execution = crud.test_run_execution.create_with_selected_tests( + db=db, + obj_in=test_run_execution_data, + selected_tests=SelectedTests(**selected_tests), ) # Assert direct properties assert test_run_execution.title == test_run_execution_title - assert test_run_execution.test_run_config_id is None # Assert created test_suite_executions test_suite_executions = test_run_execution.test_suite_executions @@ -383,16 +336,19 @@ def test_create_test_run_execution_from_selected_tests(db: Session) -> None: first_test_case_execution = test_case_executions[0] assert first_test_case_execution.public_id == first_test_case_identifier - remaining_test_cases = selected_tests["sample_tests"][first_test_suite_identifier] + remaining_test_cases = [ + x for x in selected_tests["collections"][0]["test_suites"][0]["test_cases"] + ] for test_case_execution in test_case_executions: public_id = test_case_execution.public_id # Assert all test case public id's match - assert public_id in remaining_test_cases - remaining_test_cases[public_id] -= 1 + assert public_id in [c["public_id"] for c in remaining_test_cases] + test_case = next(c for c in remaining_test_cases if c["public_id"] == public_id) + test_case["iterations"] -= 1 # Assert the correct number of test cases where created - for _, missing_count in remaining_test_cases.items(): - assert missing_count == 0 + for test_case in remaining_test_cases: + assert test_case["iterations"] == 0 def test_get_test_run_executions_by_search_query(db: Session) -> None: @@ -507,57 +463,7 @@ def test_import_execution_invalid_project_id() -> None: mocked_project_get.assert_called_once() -def test_import_execution_success_with_test_config() -> None: - mocked_db = mock.MagicMock() - - test_run_execution_dict = deepcopy(test_run_execution_base_dict) - test_run_execution_dict["operator"] = deepcopy(operator_base_dict) - test_run_execution_dict["test_run_config"] = deepcopy(test_run_config_base_dict) - - project_id = 42 - operator_id = 2 - test_run_config_id = 10 - operator_name = operator_base_dict.get("name") - - test_run_config_mock = models.TestRunConfig( - **test_run_config_base_dict, id=test_run_config_id - ) - - with mock.patch.object( - target=crud.operator, - attribute="get_or_create", - return_value=operator_id, - ) as mocked_get_or_create, mock.patch.object( - target=crud.test_run_config, - attribute="create", - return_value=test_run_config_mock, - ) as mocked_create_test_run_config: - imported_test_run = crud.test_run_execution.import_execution( - db=mocked_db, - project_id=project_id, - execution=schemas.TestRunExecutionToExport(**test_run_execution_dict), - ) - - mocked_get_or_create.assert_called_once_with( - db=mocked_db, name=operator_name, commit=False - ) - - mocked_create_test_run_config.assert_called_once_with( - db=mocked_db, - obj_in=TestRunConfigCreate(**test_run_execution_dict["test_run_config"]), - ) - - call.add(imported_test_run) in mocked_db.mock_calls - call.commit() in mocked_db.mock_calls - call.refresh(imported_test_run) in mocked_db.mock_calls - - assert imported_test_run.project_id == project_id - assert imported_test_run.title == test_run_execution_dict.get("title") - assert imported_test_run.operator_id == operator_id - assert imported_test_run.test_run_config_id == test_run_config_id - - -def test_import_execution_success_without_test_config() -> None: +def test_import_execution_success() -> None: mocked_db = mock.MagicMock() test_run_execution_dict = deepcopy(test_run_execution_base_dict) diff --git a/app/tests/test_engine/test_runner.py b/app/tests/test_engine/test_runner.py index ec2b4162..4204a5c1 100644 --- a/app/tests/test_engine/test_runner.py +++ b/app/tests/test_engine/test_runner.py @@ -20,6 +20,7 @@ from app import crud from app.models.test_enums import TestStateEnum +from app.schemas import SelectedTests from app.schemas.test_run_execution import TestRunExecutionCreate from app.test_engine.test_runner import ( AbortError, @@ -53,18 +54,34 @@ @pytest.mark.asyncio async def test_test_runner(db: Session) -> None: selected_tests = { - "sample_tests": { - "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2}, - "SampleTestSuite2": {"TCSS2003": 3}, - } + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": "TCSS1001", "iterations": 1}, + {"public_id": "TCSS1002", "iterations": 2}, + ], + }, + { + "public_id": "SampleTestSuite2", + "test_cases": [{"public_id": "TCSS2003", "iterations": 3}], + }, + ], + } + ] } # Prepare data for test_run_execution test_run_execution_title = "Test Execution title" test_run_execution_data = TestRunExecutionCreate(title=test_run_execution_title) - test_run_execution = crud.test_run_execution.create( - db=db, obj_in=test_run_execution_data, selected_tests=selected_tests + test_run_execution = crud.test_run_execution.create_with_selected_tests( + db=db, + obj_in=test_run_execution_data, + selected_tests=SelectedTests(**selected_tests), ) assert test_run_execution is not None @@ -99,16 +116,24 @@ async def test_test_runner(db: Session) -> None: @pytest.mark.asyncio async def test_test_runner_abort_in_memory(db: Session) -> None: selected_tests = { - "tool_unit_tests": { - "TestSuiteAsync": { - "TCTRNeverEnding": 1, - "TCTRInstantPass": 1, - }, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteAsync", + "test_cases": [ + {"public_id": "TCTRNeverEnding", "iterations": 1}, + {"public_id": "TCTRInstantPass", "iterations": 2}, + ], + } + ], + } + ] } test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=SelectedTests(**selected_tests) ) assert test_run_execution is not None @@ -169,16 +194,24 @@ async def test_test_runner_abort_in_memory(db: Session) -> None: @pytest.mark.asyncio async def test_test_runner_abort_db_sync(db: Session) -> None: selected_tests = { - "tool_unit_tests": { - "TestSuiteAsync": { - "TCTRNeverEnding": 1, - "TCTRInstantPass": 1, - }, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteAsync", + "test_cases": [ + {"public_id": "TCTRNeverEnding", "iterations": 1}, + {"public_id": "TCTRInstantPass", "iterations": 1}, + ], + } + ], + } + ] } test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=SelectedTests(**selected_tests) ) assert test_run_execution is not None @@ -341,17 +374,29 @@ async def test_test_runner_load__load_multiple_runs_simultaneously(db: Session) db (Session): Database fixture for creating test data. """ selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + } + ], + } + ] } - test_runner = load_test_run_for_test_cases(db=db, test_cases=selected_tests) + test_runner = load_test_run_for_test_cases( + db=db, test_cases=SelectedTests(**selected_tests) + ) assert test_runner.state != TestRunnerState.IDLE # Create a 2nd test and attempt to load it test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=SelectedTests(**selected_tests) ) with pytest.raises(LoadingError): @@ -383,10 +428,18 @@ async def test_test_runner_non_existant_test_case(db: Session) -> None: db (Session): Database fixture for creating test data. """ selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": {"TCNonExistant": 1}, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [{"public_id": "TCNonExistant", "iterations": 1}], + } + ], + } + ] } with pytest.raises(TestCaseNotFound): - load_test_run_for_test_cases(db=db, test_cases=selected_tests) + load_test_run_for_test_cases(db=db, test_cases=SelectedTests(**selected_tests)) diff --git a/app/tests/test_engine/test_runner_abort_testing.py b/app/tests/test_engine/test_runner_abort_testing.py index f6ba1163..a48b99a3 100644 --- a/app/tests/test_engine/test_runner_abort_testing.py +++ b/app/tests/test_engine/test_runner_abort_testing.py @@ -20,6 +20,7 @@ from sqlalchemy.orm import Session from app.models.test_enums import TestStateEnum +from app.schemas import SelectedTests from app.test_engine.test_runner import TestRunner from app.tests.utils.test_runner import ( get_test_case_for_public_id, @@ -215,8 +216,23 @@ async def test_abort_case_cleanup_2_tests_1_suite(db: Session) -> None: def __load_abort_tests(db: Session) -> Tuple[TestSuiteNeverEnding, TCNeverEnding]: test_suite_id = "TestSuiteNeverEnding" test_case_id = "TCNeverEnding" - selected_tests = {"tool_unit_tests": {test_suite_id: {test_case_id: 1}}} - test_runner = load_test_run_for_test_cases(db=db, test_cases=selected_tests) + selected_tests = { + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": test_suite_id, + "test_cases": [{"public_id": test_case_id, "iterations": 1}], + } + ], + } + ] + } + + test_runner = load_test_run_for_test_cases( + db=db, test_cases=SelectedTests(**selected_tests) + ) run = test_runner.test_run assert run is not None @@ -243,12 +259,25 @@ def __load_abort_tests_2_suites( test_case_id_2 = "TCTRExpectedPass" selected_tests = { - "tool_unit_tests": { - test_suite_id_1: {test_case_id_1: 1}, - test_suite_id_2: {test_case_id_2: 1}, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": test_suite_id_1, + "test_cases": [{"public_id": test_case_id_1, "iterations": 1}], + }, + { + "public_id": test_suite_id_2, + "test_cases": [{"public_id": test_case_id_2, "iterations": 1}], + }, + ], + } + ] } - test_runner = load_test_run_for_test_cases(db=db, test_cases=selected_tests) + test_runner = load_test_run_for_test_cases( + db=db, test_cases=SelectedTests(**selected_tests) + ) run = test_runner.test_run assert run is not None @@ -283,11 +312,24 @@ def __load_abort_tests_2_tests_1_suite( test_case_id_2 = "TCNeverEndingV2" selected_tests = { - "tool_unit_tests": { - test_suite_id_1: {test_case_id_1: 1, test_case_id_2: 2}, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": test_suite_id_1, + "test_cases": [ + {"public_id": test_case_id_1, "iterations": 1}, + {"public_id": test_case_id_2, "iterations": 2}, + ], + } + ], + } + ] } - test_runner = load_test_run_for_test_cases(db=db, test_cases=selected_tests) + test_runner = load_test_run_for_test_cases( + db=db, test_cases=SelectedTests(**selected_tests) + ) run = test_runner.test_run assert run is not None diff --git a/app/tests/test_engine/test_runner_exceptions.py b/app/tests/test_engine/test_runner_exceptions.py index 54293e6c..0a26c2b2 100644 --- a/app/tests/test_engine/test_runner_exceptions.py +++ b/app/tests/test_engine/test_runner_exceptions.py @@ -19,6 +19,7 @@ from sqlalchemy.orm import Session from app.models.test_enums import TestStateEnum +from app.schemas import SelectedTests from app.test_engine.models import TestCase, TestSuite from app.tests.utils.test_runner import ( get_test_case_for_public_id, @@ -467,12 +468,26 @@ async def test_exception_1st_test_suite_error_2nd_pass( test_suite_id_2 = "TestSuiteExpected" test_case_id_2 = "TCTRExpectedPass" selected_tests = { - "tool_unit_tests": { - test_suite_id_1: {test_case_id_1: 1}, - test_suite_id_2: {test_case_id_2: 1}, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": test_suite_id_1, + "test_cases": [{"public_id": test_case_id_1, "iterations": 1}], + }, + { + "public_id": test_suite_id_2, + "test_cases": [{"public_id": test_case_id_2, "iterations": 1}], + }, + ], + } + ] } - test_runner = load_test_run_for_test_cases(db=db, test_cases=selected_tests) + + test_runner = load_test_run_for_test_cases( + db=db, test_cases=SelectedTests(**selected_tests) + ) # Save test_run reference to inspect models after completion test_run = test_runner.test_run assert test_run is not None diff --git a/app/tests/test_engine/test_script_manager.py b/app/tests/test_engine/test_script_manager.py index 6201264d..564e4424 100644 --- a/app/tests/test_engine/test_script_manager.py +++ b/app/tests/test_engine/test_script_manager.py @@ -15,6 +15,7 @@ # import pytest +from app.schemas.test_selection import SelectedTests from app.test_engine.test_script_manager import ( TestCaseNotFound, TestCollectionNotFound, @@ -26,85 +27,189 @@ @pytest.mark.asyncio async def test_validate_test_selection_OK() -> None: selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - "TestSuiteAsync": {"TCTRInstantPass": 1, "TCTRNeverEnding": 3}, - }, - "sample_tests": {"SampleTestSuite1": {"TCSS1001": 1}}, + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + }, + { + "public_id": "TestSuiteAsync", + "test_cases": [ + {"public_id": "TCTRInstantPass", "iterations": 1}, + {"public_id": "TCTRNeverEnding", "iterations": 1}, + ], + }, + ], + }, + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [{"public_id": "TCSS1001", "iterations": 1}], + } + ], + }, + ] } - test_script_manager.validate_test_selection(selected_tests) + test_script_manager.validate_test_selection(SelectedTests(**selected_tests)) @pytest.mark.asyncio async def test_validate_test_selection_invalid_test_collection() -> None: selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - "TestSuiteAsync": {"TCTRInstantPass": 1, "TCTRNeverEnding": 3}, - }, - # Following test collection does not exist - "invalid_name": { - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - }, + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + }, + { + "public_id": "TestSuiteAsync", + "test_cases": [ + {"public_id": "TCTRInstantPass", "iterations": 1}, + {"public_id": "TCTRNeverEnding", "iterations": 1}, + ], + }, + ], + }, + { + "public_id": "invalid_name", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + } + ], + }, + ] } with pytest.raises(TestCollectionNotFound): - test_script_manager.validate_test_selection(selected_tests) + test_script_manager.validate_test_selection(SelectedTests(**selected_tests)) @pytest.mark.asyncio async def test_validate_test_selection_invalid_test_suite() -> None: # Test non existing test selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - # Following test suite does not exist - "invalid_test_suite": {"TCTRExpectedPass": 1}, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + }, + { + "public_id": "invalid_test_suite", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + }, + ], + } + ] } + with pytest.raises(TestSuiteNotFound): - test_script_manager.validate_test_selection(selected_tests) + test_script_manager.validate_test_selection(SelectedTests(**selected_tests)) # Test existing test suite from other collection selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - "TestSuiteAsync": {"TCTRInstantPass": 1, "TCTRNeverEnding": 3}, - }, - "sample_tests": { - # Following test suite is not in this collection - "TestSuiteExpected": {"TCTRExpectedPass": 1}, - }, + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + }, + { + "public_id": "TestSuiteAsync", + "test_cases": [ + {"public_id": "TCTRInstantPass", "iterations": 1}, + {"public_id": "TCTRNeverEnding", "iterations": 3}, + ], + }, + ], + }, + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1} + ], + } + ], + }, + ] } + with pytest.raises(TestSuiteNotFound): - test_script_manager.validate_test_selection(selected_tests) + test_script_manager.validate_test_selection(SelectedTests(**selected_tests)) @pytest.mark.asyncio async def test_validate_test_selection_invalid_test_case() -> None: # Test non existing test selected_tests = { - "tool_unit_tests": { - "TestSuiteExpected": { - "TCTRExpectedPass": 1, - # Following test case does not exist - "invalid_test_case": 1, - }, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteExpected", + "test_cases": [ + {"public_id": "TCTRExpectedPass", "iterations": 1}, + # Following test case is not in this collection + {"public_id": "invalid_test_case", "iterations": 1}, + ], + } + ], + } + ] } + with pytest.raises(TestCaseNotFound): - test_script_manager.validate_test_selection(selected_tests) + test_script_manager.validate_test_selection(SelectedTests(**selected_tests)) # Test existing test case from other test suite selected_tests = { - "tool_unit_tests": { - "TestSuiteAsync": { - "TCTRInstantPass": 1, - "TCTRNeverEnding": 3, - # Following test case is not in this collection - "TCTRExpectedPass": 1, - }, - } + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": "TestSuiteAsync", + "test_cases": [ + {"public_id": "TCTRInstantPass", "iterations": 1}, + {"public_id": "TCTRNeverEnding", "iterations": 3}, + # Following test case is not in this collection + {"public_id": "TCTRExpectedPass", "iterations": 1}, + ], + } + ], + } + ] } + with pytest.raises(TestCaseNotFound): - test_script_manager.validate_test_selection(selected_tests) + test_script_manager.validate_test_selection(SelectedTests(**selected_tests)) diff --git a/app/tests/utils/test_run_config.py b/app/tests/utils/test_run_config.py deleted file mode 100644 index 7057e628..00000000 --- a/app/tests/utils/test_run_config.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (c) 2023 Project CHIP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, Dict, Optional - -from faker import Faker -from sqlalchemy.orm import Session - -from app import crud, models -from app.schemas.test_run_config import TestRunConfigCreate - -fake = Faker() - - -def random_test_run_config_dict( - name: Optional[str] = None, - dut_name: Optional[str] = None, - selected_tests: Optional[Dict[str, Dict[str, Dict[str, int]]]] = None, -) -> dict: - output = {} - - # Name is not optional, - if name is None: - name = fake.text(max_nb_chars=20) - output["name"] = name - - # DUT Name is not optional, - if dut_name is None: - dut_name = fake.text(max_nb_chars=20) - output["dut_name"] = dut_name - - # Selected Test Cases is not optional, - if selected_tests is None: - selected_tests = {} - - output["selected_tests"] = selected_tests - - return output - - -def create_random_test_run_config(db: Session, **kwargs: Any) -> models.TestRunConfig: - test_run_config_in = TestRunConfigCreate(**random_test_run_config_dict(**kwargs)) - return crud.test_run_config.create(db, obj_in=test_run_config_in) - - -test_run_config_base_dict = { - "name": "test_test_run_config", - "dut_name": "test_dut_test_run_config", - "selected_tests": {"SDK YAML Tests": {"FirstChipToolSuite": {"TC-ACE-1.1": 1}}}, - "created_at": "2023-06-27T14:02:56.902898", -} diff --git a/app/tests/utils/test_run_execution.py b/app/tests/utils/test_run_execution.py index 606be367..ab549009 100644 --- a/app/tests/utils/test_run_execution.py +++ b/app/tests/utils/test_run_execution.py @@ -44,7 +44,7 @@ def random_test_run_execution_dict( if state is not None: output["state"] = state - # Title is not optional, + # Title is not optional if title is None: title = fake.text(max_nb_chars=20) output["title"] = title @@ -80,7 +80,7 @@ def create_random_test_run_execution_archived( def create_random_test_run_execution( - db: Session, selected_tests: SelectedTests, **kwargs: Any + db: Session, selected_tests: SelectedTests = SelectedTests(), **kwargs: Any ) -> models.TestRunExecution: test_run_execution_dict = random_test_run_execution_dict(**kwargs) @@ -89,8 +89,10 @@ def create_random_test_run_execution( test_run_execution_dict["project_id"] = project.id test_run_execution_in = TestRunExecutionCreate(**test_run_execution_dict) - return crud.test_run_execution.create( - db=db, obj_in=test_run_execution_in, selected_tests=selected_tests + return crud.test_run_execution.create_with_selected_tests( + db=db, + obj_in=test_run_execution_in, + selected_tests=selected_tests, ) @@ -102,10 +104,23 @@ def create_random_test_run_execution_with_test_case_states( # and real test case num_test_cases = sum(test_case_states.values()) selected_tests: dict = { - "sample_tests": {"SampleTestSuite1": {"TCSS1001": num_test_cases}} + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": "TCSS1001", "iterations": num_test_cases} + ], + } + ], + } + ] } + test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=SelectedTests(**selected_tests) ) test_suite_execution = test_run_execution.test_suite_executions[0] @@ -125,17 +140,44 @@ def create_random_test_run_execution_with_test_case_states( def create_test_run_execution_with_some_test_cases( db: Session, **kwargs: Any ) -> TestRunExecution: - return create_random_test_run_execution( - db=db, - selected_tests={ - "sample_tests": { - "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3} + selected_tests_dict = { + "collections": [ + { + "public_id": "sample_tests", + "test_suites": [ + { + "public_id": "SampleTestSuite1", + "test_cases": [ + {"public_id": "TCSS1001", "iterations": 1}, + {"public_id": "TCSS1002", "iterations": 2}, + {"public_id": "TCSS1003", "iterations": 3}, + ], + } + ], } - }, - **kwargs + ] + } + selected_tests = SelectedTests(**selected_tests_dict) + return create_random_test_run_execution( + db=db, selected_tests=selected_tests, **kwargs ) +selected_tests_base_dict = { + "collections": [ + { + "public_id": "SDK YAML Tests", + "test_suites": [ + { + "public_id": "FirstChipToolSuite", + "test_cases": [{"public_id": "TC-ACE-1.1", "iterations": 1}], + } + ], + } + ] +} + + test_run_execution_base_dict = { "title": "UI_Test_Run_2023_05_23_18_43_30", "description": "", diff --git a/app/tests/utils/test_runner.py b/app/tests/utils/test_runner.py index 9635b2be..932dd649 100644 --- a/app/tests/utils/test_runner.py +++ b/app/tests/utils/test_runner.py @@ -70,9 +70,27 @@ async def load_and_run_tool_unit_tests( iterations: int = 1, ) -> Tuple[TestRunner, TestRun, TestSuite, TestCase]: selected_tests = { - "tool_unit_tests": {suite_cls.public_id(): {case_cls.public_id(): iterations}} + "collections": [ + { + "public_id": "tool_unit_tests", + "test_suites": [ + { + "public_id": suite_cls.public_id(), + "test_cases": [ + { + "public_id": case_cls.public_id(), + "iterations": iterations, + } + ], + } + ], + } + ] } - runner = load_test_run_for_test_cases(db=db, test_cases=selected_tests) + + runner = load_test_run_for_test_cases( + db=db, test_cases=SelectedTests(**selected_tests) + ) run = runner.test_run assert run is not None