Skip to content

Commit

Permalink
Merge pull request #18542 from jmchilton/workflow_tests
Browse files Browse the repository at this point in the history
Implement Declarative Testing for Workflow Behaviors
  • Loading branch information
jmchilton authored Jul 30, 2024
2 parents 2e7459c + 419f538 commit a3326a9
Show file tree
Hide file tree
Showing 32 changed files with 739 additions and 347 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Framework tests
name: Tool framework tests
on:
push:
paths-ignore:
Expand Down Expand Up @@ -64,9 +64,9 @@ jobs:
uses: actions/cache@v4
with:
path: 'galaxy root/.venv'
key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-framework
key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-framework-tools
- name: Run tests
run: ./run_tests.sh --coverage --framework
run: ./run_tests.sh --coverage --framework-tools
working-directory: 'galaxy root'
- uses: codecov/codecov-action@v3
with:
Expand Down
79 changes: 79 additions & 0 deletions .github/workflows/framework_workflows.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
name: Workflow framework tests
on:
push:
paths-ignore:
- 'client/**'
- 'doc/**'
- 'lib/galaxy_test/selenium/**'
pull_request:
paths-ignore:
- 'client/**'
- 'doc/**'
- 'lib/galaxy_test/selenium/**'
schedule:
# Run at midnight UTC every Tuesday
- cron: '0 0 * * 2'
env:
GALAXY_TEST_DBURI: 'postgresql://postgres:postgres@localhost:5432/galaxy?client_encoding=utf8'
GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
name: Test
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8']
services:
postgres:
image: postgres:13
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: postgres
ports:
- 5432:5432
steps:
- if: github.event_name == 'schedule'
run: |
echo "GALAXY_CONFIG_OVERRIDE_METADATA_STRATEGY=extended" >> $GITHUB_ENV
echo "GALAXY_CONFIG_OVERRIDE_OUTPUTS_TO_WORKING_DIRECTORY=true" >> $GITHUB_ENV
- uses: actions/checkout@v4
with:
path: 'galaxy root'
- uses: actions/setup-node@v4
with:
node-version: '18.12.1'
cache: 'yarn'
cache-dependency-path: 'galaxy root/client/yarn.lock'
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Get full Python version
id: full-python-version
shell: bash
run: echo "version=$(python -c 'import sys; print("-".join(str(v) for v in sys.version_info))')" >> $GITHUB_OUTPUT
- name: Cache pip dir
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: pip-cache-${{ matrix.python-version }}-${{ hashFiles('galaxy root/requirements.txt') }}
- name: Cache galaxy venv
uses: actions/cache@v4
with:
path: 'galaxy root/.venv'
key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-framework-workflows
- name: Run tests
run: ./run_tests.sh --coverage --framework-workflows
working-directory: 'galaxy root'
- uses: codecov/codecov-action@v3
with:
flags: framework
working-directory: 'galaxy root'
- uses: actions/upload-artifact@v4
if: failure()
with:
name: Framework test results (${{ matrix.python-version }})
path: 'galaxy root/run_framework_workflows_tests.html'
2 changes: 2 additions & 0 deletions lib/galaxy/tool_util/parser/yaml.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,8 @@ def expand_dict_form(item):
return assert_list or None # XML variant is None if no assertions made


# Planemo depends on this and was never updated unfortunately.
# https://github.com/galaxyproject/planemo/blob/master/planemo/test/_check_output.py
__to_test_assert_list = to_test_assert_list


Expand Down
58 changes: 57 additions & 1 deletion lib/galaxy/tool_util/verify/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
DEFAULT_METRIC,
DEFAULT_PIN_LABELS,
)
from galaxy.tool_util.parser.yaml import to_test_assert_list
from galaxy.util import unicodify
from galaxy.util.compression_utils import get_fileobj
from .asserts import verify_assertions
Expand All @@ -56,6 +57,8 @@
log = logging.getLogger(__name__)

DEFAULT_TEST_DATA_RESOLVER = TestDataResolver()
GetFilenameT = Optional[Callable[[str], str]]
GetLocationT = Optional[Callable[[str], str]]


def verify(
Expand All @@ -64,7 +67,7 @@ def verify(
attributes: Optional[Dict[str, Any]],
filename: Optional[str] = None,
get_filecontent: Optional[Callable[[str], bytes]] = None,
get_filename: Optional[Callable[[str], str]] = None,
get_filename: GetFilenameT = None,
keep_outputs_dir: Optional[str] = None,
verify_extra_files: Optional[Callable] = None,
mode="file",
Expand Down Expand Up @@ -585,3 +588,56 @@ def files_image_diff(file1: str, file2: str, attributes: Optional[Dict[str, Any]
distance_eps = attributes.get("eps", DEFAULT_EPS)
if distance > distance_eps:
raise AssertionError(f"Image difference {distance} exceeds eps={distance_eps}.")


# TODO: After tool-util with this included is published, fefactor planemo.test._check_output
# to use this function. There is already a comment there about breaking fewer abstractions.
# https://github.com/galaxyproject/planemo/blob/master/planemo/test/_check_output.py
def verify_file_path_against_dict(
get_filename: GetFilenameT,
get_location: GetLocationT,
path: str,
output_content: bytes,
test_properties,
test_data_target_dir: Optional[str] = None,
) -> None:
with open(path, "rb") as f:
output_content = f.read()
item_label = f"Output with path {path}"
verify_file_contents_against_dict(
get_filename, get_location, item_label, output_content, test_properties, test_data_target_dir
)


def verify_file_contents_against_dict(
get_filename: GetFilenameT,
get_location: GetLocationT,
item_label: str,
output_content: bytes,
test_properties,
test_data_target_dir: Optional[str] = None,
) -> None:
# Support Galaxy-like file location (using "file") or CWL-like ("path" or "location").
expected_file = test_properties.get("file", None)
if expected_file is None:
expected_file = test_properties.get("path", None)
if expected_file is None:
location = test_properties.get("location")
if location:
if location.startswith(("http://", "https://")):
assert get_location
expected_file = get_location(location)
else:
expected_file = location.split("file://", 1)[-1]

if "asserts" in test_properties:
test_properties["assert_list"] = to_test_assert_list(test_properties["asserts"])
verify(
item_label,
output_content,
attributes=test_properties,
filename=expected_file,
get_filename=get_filename,
keep_outputs_dir=test_data_target_dir,
verify_extra_files=None,
)
79 changes: 44 additions & 35 deletions lib/galaxy/tool_util/verify/interactor.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,18 +346,7 @@ def _verify_metadata(self, history_id, hid, attributes):
`dbkey` and `tags` all map to the API description directly. Other metadata attributes
are assumed to be datatype-specific and mapped with a prefix of `metadata_`.
"""
metadata = attributes.get("metadata", {}).copy()
for key in metadata.copy().keys():
if key not in ["name", "info", "tags", "created_from_basename"]:
new_key = f"metadata_{key}"
metadata[new_key] = metadata[key]
del metadata[key]
elif key == "info":
metadata["misc_info"] = metadata["info"]
del metadata["info"]
expected_file_type = attributes.get("ftype", None)
if expected_file_type:
metadata["file_ext"] = expected_file_type
metadata = get_metadata_to_test(attributes)

if metadata:

Expand All @@ -370,29 +359,7 @@ def wait_for_content():
return None

dataset = wait_on(wait_for_content, desc="dataset metadata", timeout=10)

for key, value in metadata.items():
try:
dataset_value = dataset.get(key, None)

def compare(val, expected):
if str(val) != str(expected):
raise Exception(
f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}]. Dataset API value was [{dataset}]." # noqa: B023
)

if isinstance(dataset_value, list):
value = str(value).split(",")
if len(value) != len(dataset_value):
raise Exception(
f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}], lists differ in length. Dataset API value was [{dataset}]."
)
for val, expected in zip(dataset_value, value):
compare(val, expected)
else:
compare(dataset_value, value)
except KeyError:
raise Exception(f"Failed to verify dataset metadata, metadata key [{key}] was not found.")
compare_expected_metadata_to_api_response(metadata, dataset)

def wait_for_job(self, job_id: str, history_id: Optional[str] = None, maxseconds=DEFAULT_TOOL_TEST_WAIT) -> None:
self.wait_for(lambda: self.__job_ready(job_id, history_id), maxseconds=maxseconds)
Expand Down Expand Up @@ -1934,3 +1901,45 @@ def test_data_iter(required_files):
raise Exception(f"edit_attributes type ({edit_att.get('type', None)}) is unimplemented")

yield data_dict


def compare_expected_metadata_to_api_response(metadata: dict, dataset: dict) -> None:
for key, value in metadata.items():
try:
dataset_value = dataset.get(key, None)

def compare(val, expected):
if str(val) != str(expected):
raise Exception(
f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}]. Dataset API value was [{dataset}]." # noqa: B023
)

if isinstance(dataset_value, list):
value = str(value).split(",")
if len(value) != len(dataset_value):
raise Exception(
f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}], lists differ in length. Dataset API value was [{dataset}]."
)
for val, expected in zip(dataset_value, value):
compare(val, expected)
else:
compare(dataset_value, value)
except KeyError:
raise Exception(f"Failed to verify dataset metadata, metadata key [{key}] was not found.")


def get_metadata_to_test(test_properties: dict) -> dict:
"""Fetch out metadata to test from test property dict and adapt it to keys the API produces."""
metadata = test_properties.get("metadata", {}).copy()
for key in metadata.copy().keys():
if key not in ["name", "info", "tags", "created_from_basename"]:
new_key = f"metadata_{key}"
metadata[new_key] = metadata[key]
del metadata[key]
elif key == "info":
metadata["misc_info"] = metadata["info"]
del metadata["info"]
expected_file_type = test_properties.get("ftype", None)
if expected_file_type:
metadata["file_ext"] = expected_file_type
return metadata
Loading

0 comments on commit a3326a9

Please sign in to comment.