diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml deleted file mode 100644 index 078e7d45..00000000 --- a/.github/workflows/CI.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: CI - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - # Trigger on pull requests. - pull_request: - - # Trigger on pushes to the mainline branches. This prevents building commits twice when the pull - # request source branch is in the same repository. - push: - branches: - - "main" - - # Trigger on request. - workflow_dispatch: - -jobs: - flow-status: - name: flow-status - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.7 - - name: Set up Python - uses: actions/setup-python@v5.1.1 - with: - python-version: '3.11' - - uses: actions/cache@v4.0.2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('.github/workflows/requirements-test.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - - name: Install signac - run: python3 -m pip install -r .github/workflows/requirements-test.txt - - name: Initialize workspace - run: python3 hoomd_validation/init.py - - name: Check flow status - run: python3 hoomd_validation/project.py status diff --git a/.github/workflows/requirements-test.in b/.github/workflows/requirements-test.in new file mode 100644 index 00000000..98eb30c7 --- /dev/null +++ b/.github/workflows/requirements-test.in @@ -0,0 +1,6 @@ +h5py +matplotlib +numpy +rtoml +scipy +signac diff --git a/.github/workflows/requirements-test.txt b/.github/workflows/requirements-test.txt index bc6f9784..389526ee 100644 --- a/.github/workflows/requirements-test.txt +++ b/.github/workflows/requirements-test.txt @@ -1,6 +1,45 @@ -h5py==3.10.0 -gsd==3.2.1 -numpy==1.26.4 -PyYAML==6.0.1 +# This file was autogenerated by uv via the following command: +# uv pip compile requirements-test.in +contourpy==1.2.1 + # via matplotlib +cycler==0.12.1 + # via matplotlib +filelock==3.15.4 + # via signac +fonttools==4.53.1 + # via matplotlib +h5py==3.11.0 + # via -r requirements-test.in +kiwisolver==1.4.5 + # via matplotlib +matplotlib==3.9.2 + # via -r requirements-test.in +numpy==2.1.0 + # via + # -r requirements-test.in + # contourpy + # h5py + # matplotlib + # scipy +packaging==24.1 + # via + # matplotlib + # signac +pillow==10.4.0 + # via matplotlib +pyparsing==3.1.2 + # via matplotlib +python-dateutil==2.9.0.post0 + # via matplotlib +rtoml==0.11.0 + # via -r requirements-test.in +scipy==1.14.1 + # via -r requirements-test.in signac==2.2.0 -signac-flow==0.28.0 + # via -r requirements-test.in +six==1.16.0 + # via python-dateutil +synced-collections==1.0.0 + # via signac +tqdm==4.66.5 + # via signac diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 00000000..696e5440 --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,11 @@ +name: Close stale issues and PRs + +on: + schedule: + - cron: '0 19 * * *' + + workflow_dispatch: + +jobs: + stale: + uses: glotzerlab/workflows/.github/workflows/stale.yaml@ea2e25d07af862a1c696a932c2bd6b242d142049 # 0.2.0 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index a356f559..00000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Close stale issues and PRs - -on: - schedule: - - cron: '0 19 * * *' - - # Trigger on request. - workflow_dispatch: - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9.0.0 - with: - days-before-close: 10 - stale-issue-label: stale - stale-pr-label: stale - exempt-issue-labels: essential - exempt-pr-labels: essential - - days-before-issue-stale: 170 - stale-issue-message: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. - close-issue-message: > - This issue has been automatically closed because it has not had - recent activity. - - days-before-pr-stale: 20 - stale-pr-message: > - This pull request has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. - close-pr-message: > - This pull request has been automatically closed because it has not had - recent activity. diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 00000000..71727c7e --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,38 @@ +name: Unit test + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + + push: + branches: + - "main" + + workflow_dispatch: + +jobs: + status: + name: Initialize and show status + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: Set up Python + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + with: + python-version: "3.12" + - name: Set up Python environment + uses: glotzerlab/workflows/setup-uv@ae7e7c6931098a313ef8069ef04b88a55c3a40f6 # 0.3.0 + with: + lockfile: ".github/workflows/requirements-test.txt" + - name: Initialize workspace + run: python3 hoomd_validation/project.py init + - name: Show workflow + run: cat --number workflow.toml + - name: Set up row + uses: glotzerlab/workflows/setup-row@ae7e7c6931098a313ef8069ef04b88a55c3a40f6 # 0.3.0 + - name: Show project status + run: row show status diff --git a/.gitignore b/.gitignore index dff1e436..8e3abc2c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,10 @@ -workspace -templates +__pycache__ +.row +.signac_sp_cache.json.gz +.signac *.out *.svg hoomd_validation/__pycache__ -hoomd_validation/config.json -signac.rc signac_project_document.json -.signac_sp_cache.json.gz -__pycache__ -.signac -.bundles -*.code-workspace +workflow.toml +workspace diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 5d82aad5..fdf115f6 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -6,32 +6,44 @@ The code in this repository is designed to run validation tests using HOOMD-blue for longer periods of time than CI testing can handle. Users of this repository should be able to: -1. Run a set of validation test workflows on a variety of hardware setups. +1. Run a set of validation test workflows on the machine of their choice (workstation + and/or HPC). -2. Choose specific validation workflows to run, and be able to select subsets of -the operations in one workflow to run. +2. Choose specific validation workflows to run and be able to select subsets of + the actions in one workflow to run. -3. Visualize the validation test output and analysis using signac-dashboard. +3. Visualize the test output and analysis using signac-dashboard. ## Implementation To minimize the amount of effort needed to execute all test workflows (1), -Each validation test workflow is defined as a "subproject" of a single signac-flow -project. All operations on a subproject are prefixed with the subprojet's name -to allow for regex selection of operations at the command line (2). All operations -in a subproject use a precondition or `select` argument to limit their operations -only to the signac jobs specific to that subproject. - -To further facilitate (2), all subprojects that require it will have an operation -`_create_initial_state` as the first step in the workflow to prepare the -initial conditions used for later steps. All subprojects will also suffix operation +Each validation test workflow is defined as a "subproject" of a single row +project. All actions on a subproject are prefixed with the subprojet's name +to allow for glob selection of actions at the command line (2). All actions +in a subproject limit their actions to the signac jobs specific to that +subproject. + +To further facilitate (2), all subprojects that require it will have an action +`.create_initial_state` as the first step in the workflow to prepare the +initial conditions used for later steps. All subprojects will also suffix action names with `_cpu` or `_gpu` according to the HOOMD device they execute on. Each subproject is defined in its own module file (e.g. `lj_fluid.py`). Each module -must have a function `job_statepoints` that generates the statepoints needed for the job. -Each statepoint must have a key `"subproject"` with its name matching the subproject. -The subproject module file also includes all the flow operations for that subproject. +must have a function `job_statepoints` that generates the state points needed for the +job. Every state point must have a key `"subproject"` with its name matching the +subproject. The subproject module file implements all the actions. To add a subproject, implement its module, then: 1. Import the subproject module in `project.py`. 2. Import the subproject module in `init.py` and add it to the list of subprojects. + +## Configuration + +`hoomd-validation` allows user configuration of many parameters (such as walltime, +cores per job, etc...). Therefore, the row `workflow.toml` file must be dynamically +generated which is facilitated by the module `workflow.py`. Each subproject file (e.g. +`lj_fluid.py`) adds actions to the global list of actions in `action.py` with the +computed parameters based on the configuration file. The list of actions is used in +two ways. First, `init.py` will write out the `workflow.toml` that corresponds to +the current configuration. Second, `project.py` will dispatch actions to the methods +registered in `action.py`. diff --git a/README.md b/README.md index bb5adaaa..b40b65ee 100644 --- a/README.md +++ b/README.md @@ -1,91 +1,66 @@ # HOOMD-blue Validation -This repository contains longer running validation tests for HOOMD-blue. The -validation test workflows in this repository are organized into signac projects. - -## Requirements - -* gsd >= 2.8.0 -* numpy -* PyYAML -* signac >= 2.2.0 -* signac-flow >= 0.25.1 -* signac-dashboard [optional] -* Simulation workflow steps require either the [glotzerlab-software container] - or the following software: - * HOOMD-blue >=3.0 *(with MPI support enabled, GPU and LLVM support are optional)*, -* Analysis workflow steps require either the [glotzerlab-software container] or - the following software: - * matplotlib - * numpy - * scipy -* Workstation or HPC system with at least 16 CPU cores and 1 GPU supported by - HOOMD-blue. +This repository contains validation tests for HOOMD-blue. The workflows are organized in +a [signac] workspace and use [row]. ## Preparation Clone this repository: +```bash +git clone https://github.com/glotzerlab/hoomd-validation.git +``` +Then change to the repository's directory: ```bash -$ git clone https://github.com/glotzerlab/hoomd-validation.git -$ cd hoomd-validation +cd hoomd-validation ``` ## Configuration -Install the prerequisites into a Python environment of your choice. To use the -[glotzerlab-software container], copy `hoomd_validation/config-sample.yaml` to -`hoomd_validation/config.yaml`, uncomment the executable mapping, and set -`singularity_container` to your container image's path. +1. Install the requirements (see below) into a Python environment of your choice. +2. Copy `hoomd_validation/config-sample.toml` to `hoomd_validation/config.toml` + and set the parameters as desired. Each option is documented by a comment in the + sample configuration file. +3. Initialize the signac project directories and create `workflow.toml`. + ```bash + python3 hoomd_validation/project.py init + ``` +4. Configure [row] as necessary for your workstation or HPC resources. + > Note: `project.py init` will overwrite `workflow.toml`. -`hoomd_validation/config.yaml` also controls a number of job submission -parameters. See the commented options in `hoomd_validation/config-sample.yaml` -for a list and their default values. +[row]: https://row.readthedocs.io -## Usage +## Execute tests -1. Initialize the signac project directories, populate them with jobs and job -documents: - ```bash - python3 hoomd_validation/init.py - ``` -2. Run and analyze all validation tests: - * On a workstation (this takes a long time to complete): - ``` - $ python hoomd_validation/project.py run - ``` - * On a cluster: - 1. Populate the flow script template or your shell environment appropriately. - ``` - $ flow template create - $ vim templates/script.sh # make changes to e.g. load modules - ``` - 2. Create the simulation initial states: - ``` - $ python hoomd_validation/project.py submit -o '.*create_initial_state' - ``` - *(wait for all jobs to complete)* - 3. Run the simulations (adjust partition names according to your cluster) - ``` - $ python3 hoomd_validation/project.py submit -o '.*_cpu' --partition standard - $ python3 hoomd_validation/project.py submit -o '.*_gpu' --partition gpu - ``` - *(wait for all jobs to complete)* - 4. Run the analysis (assuming you have the analysis workflow prerequisites in your Python environment): - ``` - $ python hoomd_validation/project.py run - ``` - *(alternately, submit the analysis in stages until no jobs remain eligible)* -3. Inspect the plots produced in: - * `workspace/*.svg` - -## Dashboard - -Run the provided [signac-dashboard] application to explore the results in a web browser: +Run +```bash +row submit +``` + +To submit the first stage of the workflow. Wait for all the jobs to complete, then run +`row submit` again to start the second stage. Most subprojects in the validation +workflow have 4 stages ending with `compare_modes`. + +> Note: You can execute a single subproject with `row submit --action 'subproject_name.*' + +After you execute `compare_mode`, inspect the `svg` files saved in the repository root. +You can also run the provided [signac-dashboard] application to explore the results in a +web browser: ```bash -$ python3 dashboard.py run +python3 dashboard.py run ``` -[glotzerlab-software container]: https://glotzerlab-software.readthedocs.io/ -[signac-dashboard]: https://docs.signac.io/projects/dashboard/ +[signac]: https://signac.readthedocs.io +[signac-dashboard]: https://signac-dashboard.readthedocs.io + +## Requirements + +* h5py +* hoomd >= 4.6.0 +* matplotlib +* numpy +* rtoml +* scipy +* signac >= 2.2.0 +* signac-dashboard [optional] diff --git a/documentation/delta.md b/documentation/delta.md index 32131abf..94ab6b0e 100644 --- a/documentation/delta.md +++ b/documentation/delta.md @@ -3,31 +3,8 @@ # Recommended configuration ``` -max_cores_sim: 64 -max_cores_submission: 512 -max_gpus_submission: 8 -max_walltime: 48 -``` - -# Compiling HOOMD from source - -* When building with `ENABLE_LLVM=on`, build separate CPU and GPU builds in: - * `/scratch/bbgw/${USER}/build/hoomd-cpu` - * and `/scratch/bbgw/${USER}/build/hoomd-gpu`. -* To link to `libcuda.so`, compile `hoomd-gpu` in an interactive job: - `srun --account=bbgw-delta-gpu --partition=gpuA40x4 --nodes=1 --tasks=1 --tasks-per-node=1 --cpus-per-task=16 --mem=48g --gpus=1 --pty zsh` - -* Submitting jobs - -Unset your accounts in `signac.rc` and use environment variables to choose the account and -hoomd build at submission time: - -CPU: -``` -SBATCH_ACCOUNT=bbgw-delta-cpu PYTHONPATH=/scratch/bbgw/${USER}/build/hoomd-cpu SBATCH_EXPORT=PYTHONPATH python hoomd_validation/project.py submit -o '.*_cpu' -``` - -GPU: -``` -SBATCH_ACCOUNT=bbgw-delta-gpu PYTHONPATH=/scratch/bbgw/${USER}/build/hoomd-gpu SBATCH_EXPORT=PYTHONPATH python hoomd_validation/project.py submit -o '.*_gpu' --partition gpuA100x4 +max_cores_sim = 64 +max_cores_submission = 512 +max_gpus_submission = 8 +max_walltime = "2 days, 00:00:00" ``` diff --git a/documentation/frontier.md b/documentation/frontier.md index c7a51992..7acb7b78 100644 --- a/documentation/frontier.md +++ b/documentation/frontier.md @@ -3,36 +3,8 @@ # Recommended configuration ``` -max_cores_sim: 56 -max_cores_submission: 7168 -max_gpus_submission: 256 -max_walltime: 2 -enable_llvm: false -enable_gpu: true -``` - -## Recommended template - -``` -{% extends "frontier.sh" %} - -{% block header %} - {{- super () -}} -#SBATCH -C nvme -{% endblock header %} -{% block custom_content %} - -echo "Loading software environment." - -export GLOTZERLAB_SOFTWARE_ROOT=/mnt/bb/${USER}/software -time srun --ntasks-per-node 1 mkdir ${GLOTZERLAB_SOFTWARE_ROOT} -time srun --ntasks-per-node 1 tar --directory ${GLOTZERLAB_SOFTWARE_ROOT} -xpf ${MEMBERWORK}/mat110/software.tar -source ${GLOTZERLAB_SOFTWARE_ROOT}/variables.sh - -{% endblock custom_content %} -{% block body %} - {{- super () -}} - -echo "Completed job in $SECONDS seconds" -{% endblock body %} +max_cores_sim = 56 +max_cores_submission = 7168 +max_gpus_submission = 256 +max_walltime = "02:00:00" ``` diff --git a/documentation/greatlakes.md b/documentation/greatlakes.md index 699a0c03..702b5fc0 100644 --- a/documentation/greatlakes.md +++ b/documentation/greatlakes.md @@ -3,30 +3,8 @@ # Recommended configuration ``` -max_cores_sim: 32 -max_cores_submission: 32 -max_gpus_submission: 1 -max_walltime: 96 -``` - -# Compiling HOOMD from source - -* When building with `ENABLE_LLVM=on`, built separate CPU and GPU builds in: - * `${HOME}/build/hoomd-cpu` - * and `${HOME}/build/hoomd-gpu`. -* To link to `libcuda.so`, compile `hoomd-gpu` in an interactive job: - `srun -Asglotzer --gres=gpu:1 --nodes=1 --ntasks-per-node=1 --cpus-per-task=8 --partition gpu -t 8:00:00 --mem=64G --pty /bin/zsh` - -* Submitting jobs - -Set environment variables to choose the hoomd build and memory requirement at submission time: - -CPU: -``` -SBATCH_MEM_PER_CPU="4g" PYTHONPATH=${HOME}/build/hoomd-cpu SBATCH_EXPORT=PYTHONPATH python3 hoomd_validation/project.py submit -o '.*_cpu' -``` - -GPU: -``` -SBATCH_MEM_PER_CPU="64g" PYTHONPATH=${HOME}/build/hoomd-gpu SBATCH_EXPORT=PYTHONPATH python hoomd_validation/project.py submit -o '.*_gpu' --partition gpu +max_cores_sim = 32 +max_cores_submission = 32 +max_gpus_submission = 1 +max_walltime = "4 days, 00:00:00" ``` diff --git a/documentation/summit.md b/documentation/summit.md deleted file mode 100644 index ed307a45..00000000 --- a/documentation/summit.md +++ /dev/null @@ -1,35 +0,0 @@ -# Tips for running on OLCF Summit - -# Recommended configuration - -``` -max_cores_sim: 42 -max_cores_submission: 1344 -max_gpus_submission: 192 -max_walltime: 2 -``` - -## Recommended template - -* Write stdout/stderr to files. -* Unload `darshan-runtime` to prevent jobs from hanging on exit. - -``` -{% extends "summit.sh" %} - -{% block header %} - {{- super () -}} -#BSUB -o hoomd-validation.%J.out -#BSUB -e hoomd-validation.%J.out - -{% endblock header %} -{% block custom_content %} -echo "Loading modules." -source /ccs/proj/mat110/glotzerlab-software/joaander-test/environment.sh -module unload darshan-runtime -set -x -{% endblock custom_content %} -{% block body %} - {{- super () -}} -{% endblock body %} -``` diff --git a/hoomd_validation/.gitignore b/hoomd_validation/.gitignore index 5b6b0720..5b6c0960 100644 --- a/hoomd_validation/.gitignore +++ b/hoomd_validation/.gitignore @@ -1 +1 @@ -config.yaml +config.toml diff --git a/hoomd_validation/alj_2d.py b/hoomd_validation/alj_2d.py index 93097cd8..50d9073c 100644 --- a/hoomd_validation/alj_2d.py +++ b/hoomd_validation/alj_2d.py @@ -3,13 +3,23 @@ """ALJ 2D energy conservation validation test.""" +import itertools +import math import os -import pathlib +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -36,7 +46,9 @@ NUM_REPLICATES = min(4, CONFIG['replicates']) NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -57,45 +69,37 @@ def job_statepoints(): ) -def is_alj_2d(job): - """Test if a given job is part of the alj_2d subproject.""" - return job.cached_statepoint['subproject'] == 'alj_2d' - +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} -partition_jobs_cpu = aggregator.groupsof( - num=min(NUM_REPLICATES, CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_alj_2d, -) +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} -partition_jobs_gpu = aggregator.groupsof( - num=min(NUM_REPLICATES, CONFIG['max_gpus_submission']), - sort_by='density', - select=is_alj_2d, -) - -@Project.post.isfile('alj_2d_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=CONFIG['short_walltime'], - ), - aggregator=partition_jobs_cpu, -) -def alj_2d_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting alj2_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) init_diameter = CIRCUMCIRCLE_RADIUS * 2 * 1.15 @@ -143,11 +147,26 @@ def alj_2d_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('alj_2d_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed alj_2d_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_md_simulation( @@ -172,9 +191,6 @@ def make_md_simulation( period_multiplier (int): Factor to multiply the GSD file periods by. """ - import hoomd - from hoomd import md - incircle_d = INCIRCLE_RADIUS * 2 circumcircle_d = CIRCUMCIRCLE_RADIUS * 2 r_cut = max( @@ -182,8 +198,8 @@ def make_md_simulation( ) # pair force - nlist = md.nlist.Cell(buffer=0.4) - alj = md.pair.aniso.ALJ(default_r_cut=r_cut, nlist=nlist) + nlist = hoomd.md.nlist.Cell(buffer=0.4) + alj = hoomd.md.pair.aniso.ALJ(default_r_cut=r_cut, nlist=nlist) alj.shape['A'] = {'vertices': PARTICLE_VERTICES, 'faces': [], 'rounding_radii': 0} alj.params[('A', 'A')] = { 'epsilon': ALJ_PARAMS['epsilon'], @@ -193,12 +209,12 @@ def make_md_simulation( } # integrator - integrator = md.Integrator( + integrator = hoomd.md.Integrator( dt=0.0001, methods=[method], forces=[alj], integrate_rotational_dof=True ) # compute thermo - thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) + thermo = hoomd.md.compute.ThermodynamicQuantities(hoomd.filter.All()) # add gsd log quantities logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) @@ -238,16 +254,18 @@ def make_md_simulation( return sim -def run_nve_md_sim(job, device, complete_filename): +def run_nve_md_sim(job, device): """Run the MD simulation in NVE.""" - import hoomd - sim_mode = 'nve_md' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) else: - initial_state = job.fn('alj_2d_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) @@ -268,7 +286,7 @@ def run_nve_md_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -281,8 +299,6 @@ def run_nve_md_sim(job, device, complete_filename): nve_md_job_definitions = [ { 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu, }, ] @@ -291,44 +307,25 @@ def run_nve_md_sim(job, device, complete_filename): [ { 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, ] ) -def add_nve_md_job(device_name, ranks_per_partition, aggregator): +def add_nve_md_job(device_name): """Add a MD NVE conservation job to the workflow.""" sim_mode = 'nve_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(alj_2d_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'alj_2d_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def alj_2d_nve_md_job(*jobs): + def nve_action(*jobs): """Run NVE MD.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting alj_2d_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -341,43 +338,40 @@ def alj_2d_nve_md_job(*jobs): job, f'{sim_mode}_{device_name}.log' ), ) - run_nve_md_sim( - job, device, complete_filename=f'{sim_mode}_{device_name}_complete' - ) + run_nve_md_sim(job, device) if communicator.rank == 0: - print(f'completed alj_2d_{sim_mode}_{device_name}: {job}') - - nve_md_sampling_jobs.append(alj_2d_nve_md_job) + print(f'completed {action_name}: {job}') + + nve_md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=nve_action, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in nve_md_job_definitions: add_nve_md_job(**definition) -analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles'], sort_by='replicate_idx', select=is_alj_2d -) - -@Project.pre.after(*nve_md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='alj_2d_conservation_analysis_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, -) -def alj_2d_conservation_analyze(*jobs): +def conservation_analyze(*jobs): """Analyze the output of NVE simulations and inspect conservation.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - matplotlib.style.use('fivethirtyeight') - print('starting alj_2d_conservation_analyze:', jobs[0]) + print(f'starting {__name__}.conservation_analyze:', jobs[0]) sim_modes = ['nve_md_cpu'] if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): @@ -456,5 +450,18 @@ def plot(*, ax, data, quantity_name, legend=False): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['alj_2d_conservation_analysis_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.conservation_analyze', + Action( + method=conservation_analyze, + configuration={ + 'previous_actions': nve_md_sampling_jobs, + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/config-sample.toml b/hoomd_validation/config-sample.toml new file mode 100644 index 00000000..0b39036f --- /dev/null +++ b/hoomd_validation/config-sample.toml @@ -0,0 +1,23 @@ +## Number of replicate simulations to average over for ensemble comparisons. +# replicates = 32 + +## Maximum CPU cores to use in a single simulation. +# max_cores_sim = 16 + +## Maximum CPU cores to use in a single aggregate job submission. +# max_cores_submission = 16 + +## Maximum GPUs to use in a single aggregate job submission. +# max_gpus_submission = 1 + +## Maximum wall time (in hours) that a submitted cluster job is allowed to run. +# max_walltime = "1 day, 00:00:00" + +## Wall time (in hours) To use for short, non-restartable jobs +# short_walltime = "02:00:00" + +## Whether the HOOMD build has ENABLE_LLVM on. +# enable_llvm = true + +## Whether the HOOMD build has ENABLE_GPU on. +# enable_gpu = true diff --git a/hoomd_validation/config-sample.yaml b/hoomd_validation/config-sample.yaml deleted file mode 100644 index 2f2875b3..00000000 --- a/hoomd_validation/config-sample.yaml +++ /dev/null @@ -1,35 +0,0 @@ -## Define the executable to use. -## * Use `sys.executable` by default. -## * Use `python_exec` when set. -## * When `singularity_container` is set, use: -## `singularity exec --nv {singularity_options} {singularity_container} python` -## * When `singularity_container` and `python_exec` are set, use: -## `singularity exec --nv {singularity_options} {singularity_container} {python_exec}` -# executable: - # python_exec: "python" - # singularity_container: "container.sif" - # singularity_options: "" - -## Number of replicate simulations to average over for ensemble comparisons. -# replicates: 32 - -## Maximum CPU cores to use in a single simulation. -# max_cores_sim: 16 - -## Maximum CPU cores to use in a single aggregate job submission. -# max_cores_submission: 16 - -## Maximum GPUs to use in a single aggregate job submission. -# max_gpus_submission: 1 - -## Maximum wall time (in hours) that a submitted cluster job is allowed to run. -# max_walltime: 24 - -## Wall time (in hours) To use for short, non-restartable jobs -# short_walltime: 2 - -## Whether the HOOMD build has ENABLE_LLVM on. -# enable_llvm: true - -## Whether the HOOMD build has ENABLE_GPU on. -# enable_gpu: true diff --git a/hoomd_validation/config_parser.py b/hoomd_validation/config_parser.py index c67e8c2e..2c0bb218 100644 --- a/hoomd_validation/config_parser.py +++ b/hoomd_validation/config_parser.py @@ -4,10 +4,9 @@ """Class for parsing config files.""" import os -import sys from pathlib import Path -import yaml +import rtoml class ConfigFile(dict): @@ -20,48 +19,19 @@ class ConfigFile(dict): instance. """ - DEFAULT_CONFIG_PATH = str(Path(__file__).parent / 'config.yaml') + DEFAULT_CONFIG_PATH = str(Path(__file__).parent / 'config.toml') def __init__(self, config_file_path=DEFAULT_CONFIG_PATH): if not os.path.exists(config_file_path): config = dict() else: - with open(config_file_path) as file: - config = yaml.safe_load(file) + with open(config_file_path, encoding='utf-8') as file: + config = rtoml.load(file) - self['executable'] = self._parse_executable_string(config) self['max_cores_sim'] = int(config.get('max_cores_sim', 16)) self['max_cores_submission'] = int(config.get('max_cores_submission', 16)) self['max_gpus_submission'] = int(config.get('max_gpus_submission', 1)) - self['max_walltime'] = float(config.get('max_walltime', 24)) - self['short_walltime'] = float(config.get('short_walltime', 2)) + self['max_walltime'] = str(config.get('max_walltime', '1 day, 00:00:00')) + self['short_walltime'] = str(config.get('short_walltime', '02:00:00')) self['replicates'] = int(config.get('replicates', 32)) - self['enable_llvm'] = bool(config.get('enable_llvm', True)) self['enable_gpu'] = bool(config.get('enable_gpu', True)) - - @staticmethod - def _parse_executable_string(config_file): - """Search the config file and determine the executable. - - Searches the executable section of the config file and builds the string - needed by flow's directives. If no config file is present, we use the - python executable used to run this code. - """ - if 'executable' not in config_file: - return sys.executable - - return_string = '' - executable_options = config_file['executable'] - using_container = 'singularity_container' in executable_options - if using_container: - return_string += ( - 'singularity exec --nv ' - + executable_options.get('singularity_options', '') - + ' ' - ) - return_string += executable_options['singularity_container'] + ' ' - - return_string += executable_options.get( - 'python_exec', 'python' if using_container else sys.executable - ) - return return_string diff --git a/hoomd_validation/custom_actions.py b/hoomd_validation/custom_actions.py index 7d5d982e..37f38f3d 100644 --- a/hoomd_validation/custom_actions.py +++ b/hoomd_validation/custom_actions.py @@ -3,30 +3,39 @@ """This file contains all custom actions needed for this project.""" -import hoomd +try: + import hoomd + class ComputeDensity(hoomd.custom.Action): + """Compute the density of particles in the system. -class ComputeDensity(hoomd.custom.Action): - """Compute the density of particles in the system. + The density computed is a number density. - The density computed is a number density. + Args: + N: When not None, Use N instead of the number of particles when + computing the density. + """ - Args: - N: When not None, Use N instead of the number of particles when - computing the density. - """ + def __init__(self, N=None): + self.N = N - def __init__(self, N=None): - self.N = N + @hoomd.logging.log + def density(self): + """float: The density of the system.""" + if self.N is None: + return self._state.N_particles / self._state.box.volume - @hoomd.logging.log - def density(self): - """float: The density of the system.""" - if self.N is None: - return self._state.N_particles / self._state.box.volume + return self.N / self._state.box.volume - return self.N / self._state.box.volume + def act(self, timestep): + """Dummy act method.""" + pass +except ModuleNotFoundError as e: + print(f'Warning: {e}') + + # This workaround is to allow `python project.py init` to succeed in CI checks + # without requiring a working HOOMD installation. + class ComputeDensity: + """Placeholder class.""" - def act(self, timestep): - """Dummy act method.""" pass diff --git a/hoomd_validation/hard_disk.py b/hoomd_validation/hard_disk.py index 47e977e5..3fcaa8fe 100644 --- a/hoomd_validation/hard_disk.py +++ b/hoomd_validation/hard_disk.py @@ -3,14 +3,24 @@ """Hard disk equation of state validation test.""" +import itertools import json import os -import pathlib +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -24,7 +34,9 @@ LOG_PERIOD = {'trajectory': 50_000, 'quantities': 100} NUM_CPU_RANKS = min(64, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -47,51 +59,46 @@ def job_statepoints(): ) -def is_hard_disk(job): - """Test if a given job is part of the hard_disk subproject.""" - return job.cached_statepoint['subproject'] == 'hard_disk' - - -partition_jobs_cpu_serial = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission']), - sort_by='density', - select=is_hard_disk, -) - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_hard_disk, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_hard_disk, -) - - -@Project.post.isfile('hard_disk_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi, -) -def hard_disk_create_initial_state(*jobs): +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_serial = _resources | {'processes': {'per_directory': 1}} +_group_serial = _group | { + 'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission']) +} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +_group_compare = _group | { + 'sort_by': ['/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + + +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting hard_disk_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -136,11 +143,26 @@ def hard_disk_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('hard_disk_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed hard_disk_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): @@ -160,9 +182,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -221,17 +240,19 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" - import hoomd - sim_mode = 'nvt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('hard_disk_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) @@ -278,7 +299,7 @@ def run_nvt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -287,18 +308,20 @@ def run_nvt_sim(job, device, complete_filename): ) -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" - import hoomd - # device sim_mode = 'npt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('hard_disk_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates @@ -387,7 +410,7 @@ def run_npt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -396,18 +419,19 @@ def run_npt_sim(job, device, complete_filename): ) -def run_nec_sim(job, device, complete_filename): +def run_nec_sim(job, device): """Run MC sim in NVT with NEC.""" - import hoomd - from custom_actions import ComputeDensity - sim_mode = 'nec' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('hard_disk_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False mc = hoomd.hpmc.nec.integrate.Sphere( @@ -518,7 +542,7 @@ def run_nec_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -532,20 +556,20 @@ def run_nec_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'nec', 'device_name': 'cpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_cpu_serial, + 'resources': _resources_serial, + 'group': _group_serial, }, ] @@ -555,42 +579,26 @@ def run_nec_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, + 'resources': _resources_gpu, + 'group': _group_gpu, }, ] ) -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) + action_name = f'{__name__}.{mode}_{device_name}' - if device_name == 'gpu': - directives['ngpu'] = directives['nranks'] - - @Project.pre.after(hard_disk_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'hard_disk_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting hard_disk_{mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -604,128 +612,133 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: - print(f'completed hard_disk_{mode}_{device_name}: {job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_hard_disk) -@Project.pre.after(*sampling_jobs) -@Project.post.true('hard_disk_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def hard_disk_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting hard_disk_analyze:', job) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - sim_modes = [ - 'nvt_cpu', - 'nec_cpu', - 'npt_cpu', - ] + sim_modes = [ + 'nvt_cpu', + 'nec_cpu', + 'npt_cpu', + ] - if os.path.exists(job.fn('nvt_gpu_quantities.h5')): - sim_modes.extend(['nvt_gpu']) + if os.path.exists(job.fn('nvt_gpu_quantities.h5')): + sim_modes.extend(['nvt_gpu']) - util._sort_sim_modes(sim_modes) + util._sort_sim_modes(sim_modes) + + timesteps = {} + pressures = {} + densities = {} - timesteps = {} - pressures = {} - densities = {} + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + if 'nec' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + ] + else: + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - if 'nec' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' ] - else: - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), + # Plot results + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 1, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, ) + ax.legend() - # Plot results - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 1, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 1, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') - - job.document['hard_disk_analysis_complete'] = True - + ax = fig.add_subplot(2, 1, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='hard_disk_analysis_complete')) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='hard_disk_compare_modes_complete') -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=['density', 'num_particles'], sort_by='replicate_idx', select=is_hard_disk + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def hard_disk_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy + +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting hard_disk_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [ 'nvt_cpu', @@ -770,7 +783,7 @@ def hard_disk_compare_modes(*jobs): avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - avg_quantity, stderr_quantity = util.plot_vs_expected( + util.plot_vs_expected( ax=ax, values=quantities, ylabel=labels[quantity_name], @@ -782,5 +795,18 @@ def hard_disk_compare_modes(*jobs): filename = f'hard_disk_compare_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['hard_disk_compare_modes_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/hard_sphere.py b/hoomd_validation/hard_sphere.py index 4428641f..4d418928 100644 --- a/hoomd_validation/hard_sphere.py +++ b/hoomd_validation/hard_sphere.py @@ -3,13 +3,23 @@ """Hard sphere equation of state validation test.""" +import itertools import os -import pathlib +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -42,51 +52,40 @@ def job_statepoints(): ) -def is_hard_sphere(job): - """Test if a given job is part of the hard_sphere subproject.""" - return job.cached_statepoint['subproject'] == 'hard_sphere' - - -partition_jobs_cpu_serial = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission']), - sort_by='density', - select=is_hard_sphere, -) - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_hard_sphere, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_hard_sphere, -) +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_serial = _resources | {'processes': {'per_directory': 1}} +_group_serial = _group | { + 'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission']) +} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} +_group_compare = _group | { + 'sort_by': ['/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} -@Project.post.isfile('hard_sphere_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi, -) -def hard_sphere_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting hard_sphere_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -131,11 +130,26 @@ def hard_sphere_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('hard_sphere_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed hard_sphere_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): @@ -155,9 +169,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -216,11 +227,14 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" - initial_state = job.fn('hard_sphere_initial_state.gsd') sim_mode = 'nvt' + if util.is_simulation_complete(job, device, sim_mode): + return + + initial_state = job.fn('initial_state.gsd') sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) # equilibrate @@ -241,17 +255,18 @@ def run_nvt_sim(job, device, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" - import hoomd - # device - initial_state = job.fn('hard_sphere_initial_state.gsd') sim_mode = 'npt' + if util.is_simulation_complete(job, device, sim_mode): + return + + initial_state = job.fn('initial_state.gsd') # box updates boxmc = hoomd.hpmc.update.BoxMC( betaP=job.cached_statepoint['pressure'], trigger=hoomd.trigger.Periodic(1) @@ -301,17 +316,18 @@ def run_npt_sim(job, device, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) -def run_nec_sim(job, device, complete_filename): +def run_nec_sim(job, device): """Run MC sim in NVT with NEC.""" - import hoomd - from custom_actions import ComputeDensity - - initial_state = job.fn('hard_sphere_initial_state.gsd') sim_mode = 'nec' + if util.is_simulation_complete(job, device, sim_mode): + return + + initial_state = job.fn('initial_state.gsd') + mc = hoomd.hpmc.nec.integrate.Sphere( default_d=0.05, update_fraction=0.01, nselect=1 ) @@ -389,7 +405,7 @@ def run_nec_sim(job, device, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) sampling_jobs = [] @@ -397,65 +413,50 @@ def run_nec_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'nec', 'device_name': 'cpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_cpu_serial, + 'resources': _resources_serial, + 'group': _group_serial, }, ] + if CONFIG['enable_gpu']: job_definitions.extend( [ { 'mode': 'nvt', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, + 'resources': _resources_gpu, + 'group': _group_gpu, }, ] ) -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, group, resources): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = directives['nranks'] + action_name = f'{__name__}.{mode}_{device_name}' - @Project.pre.after(hard_sphere_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'hard_sphere_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting hard_sphere_{mode}_{device_name}', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -469,126 +470,131 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: - print(f'completed hard_sphere_{mode}_{device_name}: {job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_hard_sphere) -@Project.pre.after(*sampling_jobs) -@Project.post.true('hard_sphere_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def hard_sphere_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting hard_sphere_analyze:', job) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - sim_modes = [ - 'nvt_cpu', - 'nec_cpu', - 'npt_cpu', - ] + sim_modes = [ + 'nvt_cpu', + 'nec_cpu', + 'npt_cpu', + ] - if os.path.exists(job.fn('nvt_gpu_quantities.h5')): - sim_modes.extend(['nvt_gpu']) + if os.path.exists(job.fn('nvt_gpu_quantities.h5')): + sim_modes.extend(['nvt_gpu']) - util._sort_sim_modes(sim_modes) + util._sort_sim_modes(sim_modes) + + timesteps = {} + pressures = {} + densities = {} - timesteps = {} - pressures = {} - densities = {} + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + if 'nec' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + ] + else: + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - if 'nec' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' ] - else: - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 1, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, ) + ax.legend() - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 1, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 1, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') - - job.document['hard_sphere_analysis_complete'] = True - + ax = fig.add_subplot(2, 1, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='hard_sphere_analysis_complete')) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='hard_sphere_compare_modes_complete') -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=['density', 'num_particles'], sort_by='replicate_idx', select=is_hard_sphere + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def hard_sphere_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy + +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting hard_sphere_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [ 'nvt_cpu', @@ -633,7 +639,7 @@ def hard_sphere_compare_modes(*jobs): avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - avg_quantity, stderr_quantity = util.plot_vs_expected( + util.plot_vs_expected( ax=ax, values=quantities, ylabel=labels[quantity_name], @@ -645,5 +651,18 @@ def hard_sphere_compare_modes(*jobs): filename = f'hard_sphere_compare_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['hard_sphere_compare_modes_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/init.py b/hoomd_validation/init.py deleted file mode 100644 index 5d13109e..00000000 --- a/hoomd_validation/init.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2022-2024 The Regents of the University of Michigan. -# Part of HOOMD-blue, released under the BSD 3-Clause License. - -"""Populate the signac project with jobs and job document parameters.""" - -# import subprojects -import alj_2d -import config -import hard_disk -import hard_sphere -import lj_fluid -import lj_union -import patchy_particle_pressure -import signac -import simple_polygon - -subprojects = [ - alj_2d, - lj_fluid, - lj_union, - hard_disk, - hard_sphere, - simple_polygon, - patchy_particle_pressure, -] - -project = signac.init_project(path=config.project_root) - -# initialize jobs for validation test projects -for subproject in subprojects: - # add all the jobs to the project - for job_sp in subproject.job_statepoints(): - job = project.open_job(job_sp).init() diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 09836810..933cc372 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -4,15 +4,26 @@ """Lennard Jones phase behavior validation test.""" import collections +import itertools import json import math import os -import pathlib +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy +import scipy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -27,7 +38,9 @@ LJ_PARAMS = {'epsilon': 1.0, 'sigma': 1.0} NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 # Limit the number of long NVE runs to reduce the number of CPU hours needed. NUM_NVE_RUNS = 2 @@ -59,7 +72,7 @@ def job_statepoints(): for idx in replicate_indices: yield ( { - 'subproject': 'lj_fluid', + 'subproject': __name__, 'kT': param['kT'], 'density': param['density'], 'pressure': param['pressure'], @@ -71,50 +84,49 @@ def job_statepoints(): ) -def is_lj_fluid(job): - """Test if a given job is part of the lj_fluid subproject.""" - return job.cached_statepoint['subproject'] == 'lj_fluid' - - -def sort_key(job): - """Aggregator sort key.""" - return (job.cached_statepoint['density'], job.cached_statepoint['num_particles']) - - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by=sort_key, - select=is_lj_fluid, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by=sort_key, - select=is_lj_fluid, -) +_group = { + 'sort_by': ['/density', '/num_particles'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles', '/r_cut'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +_include_nve = { + 'include': [ + { + 'all': [ + ['/subproject', '==', __name__], + ['/replicate_idx', '<', NUM_NVE_RUNS], + ] + } + ] +} +_group_nve_cpu = _group_cpu | _include_nve +_group_nve_gpu = _group_gpu | _include_nve -@Project.post.isfile('lj_fluid_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=CONFIG['short_walltime'], - ), - aggregator=partition_jobs_cpu_mpi, -) -def lj_fluid_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting lj_fluid_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) sp = job.sp device = hoomd.device.CPU( @@ -158,12 +170,26 @@ def lj_fluid_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('lj_fluid_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed lj_fluid_create_initial_state: {job}') - + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) ################################# # MD ensemble simulations @@ -200,14 +226,11 @@ def make_md_simulation( period_multiplier (int): Factor to multiply the GSD file periods by. """ - import hoomd - from hoomd import md - # pair force if extra_loggables is None: extra_loggables = [] - nlist = md.nlist.Cell(buffer=0.4) - lj = md.pair.LJ( + nlist = hoomd.md.nlist.Cell(buffer=0.4) + lj = hoomd.md.pair.LJ( default_r_cut=job.cached_statepoint['r_cut'], default_r_on=job.cached_statepoint['r_on'], nlist=nlist, @@ -216,10 +239,10 @@ def make_md_simulation( lj.mode = 'xplor' # integrator - integrator = md.Integrator(dt=0.001, methods=[method], forces=[lj]) + integrator = hoomd.md.Integrator(dt=0.001, methods=[method], forces=[lj]) # compute thermo - thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) + thermo = hoomd.md.compute.ThermodynamicQuantities(hoomd.filter.All()) # add gsd log quantities logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) @@ -259,27 +282,23 @@ def make_md_simulation( return sim -def run_md_sim(job, device, ensemble, thermostat, complete_filename): +def run_md_sim(job, device, ensemble, thermostat): """Run the MD simulation with the given ensemble and thermostat.""" - import hoomd - from custom_actions import ComputeDensity - from hoomd import md - - initial_state = job.fn('lj_fluid_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') if ensemble == 'nvt': if thermostat == 'langevin': - method = md.methods.Langevin( + method = hoomd.md.methods.Langevin( hoomd.filter.All(), kT=job.cached_statepoint['kT'] ) method.gamma.default = 1.0 elif thermostat == 'mttk': - method = md.methods.ConstantVolume(filter=hoomd.filter.All()) + method = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All()) method.thermostat = hoomd.md.methods.thermostats.MTTK( kT=job.cached_statepoint['kT'], tau=0.25 ) elif thermostat == 'bussi': - method = md.methods.ConstantVolume(filter=hoomd.filter.All()) + method = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All()) method.thermostat = hoomd.md.methods.thermostats.Bussi( kT=job.cached_statepoint['kT'] ) @@ -287,7 +306,7 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): raise ValueError(f'Unsupported thermostat {thermostat}') elif ensemble == 'npt': p = job.cached_statepoint['pressure'] - method = md.methods.ConstantPressure( + method = hoomd.md.methods.ConstantPressure( hoomd.filter.All(), S=[p, p, p, 0, 0, 0], tauS=3, couple='xyz' ) if thermostat == 'bussi': @@ -299,6 +318,9 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): sim_mode = f'{ensemble}_{thermostat}_md' + if util.is_simulation_complete(job, device, sim_mode): + return + density_compute = ComputeDensity() sim = make_md_simulation( job, device, initial_state, method, sim_mode, extra_loggables=[density_compute] @@ -311,7 +333,9 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): # thermalize the thermostat (if applicable) if ( - isinstance(method, (md.methods.ConstantPressure, md.methods.ConstantVolume)) + isinstance( + method, (hoomd.md.methods.ConstantPressure, hoomd.md.methods.ConstantVolume) + ) ) and hasattr(method.thermostat, 'thermalize_dof'): sim.run(0) method.thermostat.thermalize_dof() @@ -325,7 +349,8 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): device.notice('Running...') sim.run(RUN_STEPS) - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) + device.notice('Done.') @@ -335,29 +360,21 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): 'ensemble': 'nvt', 'thermostat': 'langevin', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'nvt', 'thermostat': 'mttk', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'nvt', 'thermostat': 'bussi', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'npt', 'thermostat': 'bussi', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, ] @@ -368,67 +385,40 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): 'ensemble': 'nvt', 'thermostat': 'langevin', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'nvt', 'thermostat': 'mttk', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'nvt', 'thermostat': 'bussi', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'npt', 'thermostat': 'bussi', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, ] ) -def add_md_sampling_job( - ensemble, thermostat, device_name, ranks_per_partition, aggregator -): +def add_md_sampling_job(ensemble, thermostat, device_name): """Add a MD sampling job to the workflow.""" sim_mode = f'{ensemble}_{thermostat}_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_fluid_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_fluid_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def md_sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -447,25 +437,41 @@ def md_sampling_operation(*jobs): device, ensemble, thermostat, - complete_filename=f'{sim_mode}_{device_name}_complete', ) if communicator.rank == 0: - print(f'completed lj_fluid_{sim_mode}_{device_name}: {job}') - - md_sampling_jobs.append(md_sampling_operation) + print(f'completed {action_name}: {job}') + + md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=md_sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in md_job_definitions: add_md_sampling_job(**definition) -################################# -# MC simulations -################################# +# ################################# +# # MC simulations +# ################################# def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): - """Make an MC Simulation. + """Make a MC Simulation. Args: job (`signac.job.Job`): Signac job object. @@ -476,16 +482,11 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non extra_loggables (list): List of extra loggables to log to gsd files. Patch energies are logged by default. """ - import hoomd - import numpy - from custom_actions import ComputeDensity - from hoomd import hpmc - if extra_loggables is None: extra_loggables = [] # integrator - mc = hpmc.integrate.Sphere(nselect=1) + mc = hoomd.hpmc.integrate.Sphere(nselect=1) mc.shape['A'] = dict(diameter=0.0) # pair potential @@ -555,7 +556,7 @@ def _compute_virial_pressure(): logger[('custom', 'virial_pressure')] = (_compute_virial_pressure, 'scalar') # move size tuner - mstuner = hpmc.tune.MoveSize.scale_solver( + mstuner = hoomd.hpmc.tune.MoveSize.scale_solver( moves=['d'], target=0.2, max_translation_move=0.5, @@ -572,18 +573,20 @@ def _compute_virial_pressure(): return sim -def run_nvt_mc_sim(job, device, complete_filename): +def run_nvt_mc_sim(job, device): """Run MC sim in NVT.""" - import hoomd - # simulation sim_mode = 'nvt_mc' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('lj_fluid_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode) @@ -629,7 +632,7 @@ def run_nvt_mc_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -638,23 +641,23 @@ def run_nvt_mc_sim(job, device, complete_filename): ) -def run_npt_mc_sim(job, device, complete_filename): +def run_npt_mc_sim(job, device): """Run MC sim in NPT.""" - import hoomd - from hoomd import hpmc - - # device sim_mode = 'npt_mc' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('lj_fluid_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates - boxmc = hpmc.update.BoxMC( + boxmc = hoomd.hpmc.update.BoxMC( betaP=job.cached_statepoint['pressure'] / job.cached_statepoint['kT'], trigger=hoomd.trigger.Periodic(1), ) @@ -667,7 +670,7 @@ def run_npt_mc_sim(job, device, complete_filename): sim.operations.add(boxmc) - boxmc_tuner = hpmc.tune.BoxMCMoveSize.scale_solver( + boxmc_tuner = hoomd.hpmc.tune.BoxMCMoveSize.scale_solver( trigger=hoomd.trigger.And( [ hoomd.trigger.Periodic(400), @@ -734,7 +737,7 @@ def run_npt_mc_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -748,47 +751,27 @@ def run_npt_mc_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, ] -def add_mc_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_mc_sampling_job(mode, device_name): """Add a MC sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) + action_name = f'{__name__}.{mode}_mc_{device_name}' - @Project.pre.after(lj_fluid_create_initial_state) - @Project.post.isfile(f'{mode}_mc_{device_name}_complete') - @Project.operation( - name=f'lj_fluid_{mode}_mc_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_fluid_{mode}_mc_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -802,187 +785,188 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_mc_sim')( - job, device, complete_filename=f'{mode}_mc_{device_name}_complete' - ) + globals().get(f'run_{mode}_mc_sim')(job, device) if communicator.rank == 0: - print(f'completed lj_fluid_{mode}_mc_{device_name}: {job}') - - mc_sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + mc_sampling_jobs.append(action_name) + + sim_mode = mode + '_mc' + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in mc_job_definitions: add_mc_sampling_job(**definition) -@Project.pre(is_lj_fluid) -@Project.pre.after(*md_sampling_jobs) -@Project.pre.after(*mc_sampling_jobs) -@Project.post.true('lj_fluid_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def lj_fluid_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting lj_fluid_analyze:', job) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - sim_modes = [ - 'nvt_langevin_md_cpu', - 'nvt_mttk_md_cpu', - 'nvt_bussi_md_cpu', - 'npt_bussi_md_cpu', - ] + sim_modes = [ + 'nvt_langevin_md_cpu', + 'nvt_mttk_md_cpu', + 'nvt_bussi_md_cpu', + 'npt_bussi_md_cpu', + ] - if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): - sim_modes.extend( - [ - 'nvt_langevin_md_gpu', - 'nvt_mttk_md_gpu', - 'nvt_bussi_md_gpu', - 'npt_bussi_md_gpu', - ] - ) + if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): + sim_modes.extend( + [ + 'nvt_langevin_md_gpu', + 'nvt_mttk_md_gpu', + 'nvt_bussi_md_gpu', + 'npt_bussi_md_gpu', + ] + ) - if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): - sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): + sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) - util._sort_sim_modes(sim_modes) + util._sort_sim_modes(sim_modes) - timesteps = {} - energies = {} - pressures = {} - densities = {} - linear_momentum = {} + timesteps = {} + energies = {} + pressures = {} + densities = {} + linear_momentum = {} - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - if 'md' in sim_mode: - energies[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' - ] - else: - energies[sim_mode] = ( - log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] - * job.cached_statepoint['kT'] - ) + if 'md' in sim_mode: + energies[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' + ] + else: + energies[sim_mode] = ( + log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] + * job.cached_statepoint['kT'] + ) - energies[sim_mode] /= job.cached_statepoint['num_particles'] + energies[sim_mode] /= job.cached_statepoint['num_particles'] + + if 'md' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' + ] + else: + pressures[sim_mode] = log_traj['hoomd-data/custom/virial_pressure'] - if 'md' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' ] - else: - pressures[sim_mode] = log_traj['hoomd-data/custom/virial_pressure'] - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + if 'md' in sim_mode and 'langevin' not in sim_mode: + momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] + linear_momentum[sim_mode] = [ + math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) + for v in momentum_vector + ] + else: + linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) + + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + potential_energy=float(numpy.mean(energies[mode])), + density=float(numpy.mean(densities[mode])), + ) - if 'md' in sim_mode and 'langevin' not in sim_mode: - momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] - linear_momentum[sim_mode] = [ - math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector - ] - else: - linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) - - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - potential_energy=float(numpy.mean(energies[mode])), - density=float(numpy.mean(densities[mode])), + # Plot results + fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') + ax = fig.add_subplot(2, 2, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, ) + ax.legend() - # Plot results - fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') - ax = fig.add_subplot(2, 2, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() + ax = fig.add_subplot(2, 2, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) - ax = fig.add_subplot(2, 2, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) + ax = fig.add_subplot(2, 2, 3) + util.plot_timeseries( + ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 + ) - ax = fig.add_subplot(2, 2, 3) - util.plot_timeseries( - ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 - ) + ax = fig.add_subplot(2, 2, 4) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data={ + mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] + for mode, lm in linear_momentum.items() + }, + ylabel=r'$|\vec{p}| / N$', + max_points=500, + ) - ax = fig.add_subplot(2, 2, 4) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data={ - mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] - for mode, lm in linear_momentum.items() + fig.suptitle( + f'$kT={job.cached_statepoint["kT"]}$, ' + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': md_sampling_jobs + mc_sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, }, - ylabel=r'$|\vec{p}| / N$', - max_points=500, - ) - - fig.suptitle( - f'$kT={job.cached_statepoint["kT"]}$, ' - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') - - job.document['lj_fluid_analysis_complete'] = True - - -analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles', 'r_cut'], - sort_by='replicate_idx', - select=is_lj_fluid, + ), ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_fluid_analysis_complete')) -@Project.post(lambda *jobs: util.true_all(*jobs, key='lj_fluid_compare_modes_complete')) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, -) -def lj_fluid_compare_modes(*jobs): +def compare_modes(*jobs): """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting lj_fluid_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [ 'nvt_langevin_md_cpu', @@ -1092,26 +1076,25 @@ def lj_fluid_compare_modes(*jobs): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_fluid_compare_modes_complete'] = True - -@Project.pre.after(*md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_fluid_distribution_analyze_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), ) -def lj_fluid_distribution_analyze(*jobs): - """Checks that MD follows the correct KE distribution.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - import scipy + +def distribution_analyze(*jobs): + """Checks that MD follows the correct KE distribution.""" matplotlib.style.use('fivethirtyeight') print('starting lj_fluid_distribution_analyze:', jobs[0]) @@ -1164,7 +1147,6 @@ def lj_fluid_distribution_analyze(*jobs): else: n_dof = num_particles * 3 - 3 - print('Reading' + job.fn(sim_mode + '_quantities.h5')) log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) if 'md' in sim_mode: @@ -1259,27 +1241,40 @@ def lj_fluid_distribution_analyze(*jobs): ) fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_fluid_distribution_analyze_complete'] = True +ValidationWorkflow.add_action( + f'{__name__}.distribution_analyze', + Action( + method=distribution_analyze, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) -################################# -# MD conservation simulations -################################# +# ################################# +# # MD conservation simulations +# ################################# -def run_nve_md_sim(job, device, run_length, complete_filename): +def run_nve_md_sim(job, device, run_length): """Run the MD simulation in NVE.""" - import hoomd - sim_mode = 'nve_md' + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') is_restarting = job.isfile(restart_filename) if is_restarting: initial_state = job.fn(restart_filename) else: - initial_state = job.fn('lj_fluid_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) @@ -1303,7 +1298,7 @@ def run_nve_md_sim(job, device, run_length, complete_filename): ) if sim.timestep == RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -1314,32 +1309,10 @@ def run_nve_md_sim(job, device, run_length, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') -def is_lj_fluid_nve(job): - """Test if a given job should be run for NVE conservation.""" - return ( - job.cached_statepoint['subproject'] == 'lj_fluid' - and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS - ) - - -partition_jobs_cpu_mpi_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by=sort_key, - select=is_lj_fluid_nve, -) - -partition_jobs_gpu_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by=sort_key, - select=is_lj_fluid_nve, -) - nve_md_sampling_jobs = [] nve_md_job_definitions = [ { 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_nve, 'run_length': 10_000_000, }, ] @@ -1349,45 +1322,26 @@ def is_lj_fluid_nve(job): [ { 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu_nve, 'run_length': 100_000_000, }, ] ) -def add_nve_md_job(device_name, ranks_per_partition, aggregator, run_length): +def add_nve_md_job(device_name, run_length): """Add a MD NVE conservation job to the workflow.""" sim_mode = 'nve_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_fluid_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_fluid_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def lj_fluid_nve_md_job(*jobs): + def nve_action(*jobs): """Run NVE MD.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -1404,45 +1358,40 @@ def lj_fluid_nve_md_job(*jobs): job, device, run_length=run_length, - complete_filename=f'{sim_mode}_{device_name}_complete', ) if communicator.rank == 0: - print(f'completed lj_fluid_{sim_mode}_{device_name} {job}') - - nve_md_sampling_jobs.append(lj_fluid_nve_md_job) + print(f'completed {action_name}: {job}') + + nve_md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=nve_action, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_nve_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in nve_md_job_definitions: add_nve_md_job(**definition) -nve_analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles', 'r_cut'], - sort_by='replicate_idx', - select=is_lj_fluid_nve, -) - -@Project.pre.after(*nve_md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_fluid_conservation_analysis_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=nve_analysis_aggregator, -) -def lj_fluid_conservation_analyze(*jobs): +def conservation_analyze(*jobs): """Analyze the output of NVE simulations and inspect conservation.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting lj_fluid_conservation_analyze:', jobs[0]) + print(f'starting {__name__}.conservation_analyze:', jobs[0]) sim_modes = ['nve_md_cpu'] if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): @@ -1523,5 +1472,18 @@ def plot(*, ax, data, quantity_name, legend=False): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_fluid_conservation_analysis_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.conservation_analyze', + Action( + method=conservation_analyze, + configuration={ + 'previous_actions': nve_md_sampling_jobs, + 'group': _group_compare | _include_nve, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/lj_union.py b/hoomd_validation/lj_union.py index a9878f91..921eacc7 100644 --- a/hoomd_validation/lj_union.py +++ b/hoomd_validation/lj_union.py @@ -4,15 +4,26 @@ """Lennard Jones phase behavior validation test (union particles).""" import collections +import itertools import json import math import os -import pathlib +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy +import scipy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -37,7 +48,9 @@ (0.5, 0.5, 0.5), ] -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 # Limit the number of long NVE runs to reduce the number of CPU hours needed. NUM_NVE_RUNS = 2 @@ -62,47 +75,51 @@ def job_statepoints(): ) -def is_lj_union(job): - """Test if a given job is part of the lj_union subproject.""" - return job.cached_statepoint['subproject'] == 'lj_union' - - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_lj_union, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_lj_union, -) +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +_include_nve = { + 'include': [ + { + 'all': [ + ['/subproject', '==', __name__], + ['/replicate_idx', '<', NUM_NVE_RUNS], + ] + } + ] +} +_group_nve_cpu = _group_cpu | _include_nve +_group_nve_gpu = _group_gpu | _include_nve -@Project.post.isfile('lj_union_initial_state_md.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=CONFIG['short_walltime'], - ), - aggregator=partition_jobs_cpu_mpi, -) -def lj_union_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - min_spacing = math.sqrt(3) + 1 communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd') and job.isfile('initial_state_md.gsd'): + return + if communicator.rank == 0: - print('starting lj_union_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) sp = job.sp device = hoomd.device.CPU( @@ -148,7 +165,7 @@ def lj_union_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('lj_union_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) # Create rigid bodies for MD @@ -163,12 +180,26 @@ def lj_union_create_initial_state(*jobs): rigid.create_bodies(sim.state) hoomd.write.GSD.write( - state=sim.state, filename=job.fn('lj_union_initial_state_md.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state_md.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed lj_union_create_initial_state: {job}') - + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd', 'initial_state_md.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) ################################# # MD ensemble simulations @@ -205,15 +236,12 @@ def make_md_simulation( period_multiplier (int): Factor to multiply the GSD file periods by. """ - import hoomd - from hoomd import md - if extra_loggables is None: extra_loggables = [] # pair force - nlist = md.nlist.Cell(buffer=0.4, exclusions=('body',)) - lj = md.pair.LJ( + nlist = hoomd.md.nlist.Cell(buffer=0.4, exclusions=('body',)) + lj = hoomd.md.pair.LJ( default_r_cut=LJ_PARAMS['r_cut'], default_r_on=LJ_PARAMS['r_on'], nlist=nlist ) lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) @@ -227,7 +255,7 @@ def make_md_simulation( lj.mode = 'xplor' # integrator - integrator = md.Integrator( + integrator = hoomd.md.Integrator( dt=0.0005, methods=[method], forces=[lj], integrate_rotational_dof=True ) @@ -241,7 +269,7 @@ def make_md_simulation( integrator.rigid = rigid # compute thermo - thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) + thermo = hoomd.md.compute.ThermodynamicQuantities(hoomd.filter.All()) # add gsd log quantities logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) @@ -283,29 +311,25 @@ def make_md_simulation( return sim -def run_md_sim(job, device, ensemble, thermostat, complete_filename): +def run_md_sim(job, device, ensemble, thermostat): """Run the MD simulation with the given ensemble and thermostat.""" - import hoomd - from custom_actions import ComputeDensity - from hoomd import md - - initial_state = job.fn('lj_union_initial_state_md.gsd') + initial_state = job.fn('initial_state_md.gsd') integrate_filter = hoomd.filter.Rigid(flags=('center',)) if ensemble == 'nvt': if thermostat == 'langevin': - method = md.methods.Langevin( + method = hoomd.md.methods.Langevin( filter=integrate_filter, kT=job.cached_statepoint['kT'] ) method.gamma.default = 1.0 elif thermostat == 'mttk': - method = md.methods.ConstantVolume(filter=integrate_filter) + method = hoomd.md.methods.ConstantVolume(filter=integrate_filter) method.thermostat = hoomd.md.methods.thermostats.MTTK( kT=job.cached_statepoint['kT'], tau=0.25 ) elif thermostat == 'bussi': - method = md.methods.ConstantVolume(filter=integrate_filter) + method = hoomd.md.methods.ConstantVolume(filter=integrate_filter) method.thermostat = hoomd.md.methods.thermostats.Bussi( kT=job.cached_statepoint['kT'] ) @@ -313,7 +337,7 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): raise ValueError(f'Unsupported thermostat {thermostat}') elif ensemble == 'npt': p = job.cached_statepoint['pressure'] - method = md.methods.ConstantPressure( + method = hoomd.md.methods.ConstantPressure( integrate_filter, S=[p, p, p, 0, 0, 0], tauS=3, couple='xyz' ) if thermostat == 'bussi': @@ -325,6 +349,9 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): sim_mode = f'{ensemble}_{thermostat}_md' + if util.is_simulation_complete(job, device, sim_mode): + return + density_compute = ComputeDensity(job.cached_statepoint['num_particles']) sim = make_md_simulation( job, device, initial_state, method, sim_mode, extra_loggables=[density_compute] @@ -337,7 +364,9 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): # thermalize the thermostat (if applicable) if ( - isinstance(method, (md.methods.ConstantPressure, md.methods.ConstantVolume)) + isinstance( + method, (hoomd.md.methods.ConstantPressure, hoomd.md.methods.ConstantVolume) + ) ) and hasattr(method.thermostat, 'thermalize_dof'): sim.run(0) method.thermostat.thermalize_dof() @@ -352,7 +381,7 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) md_sampling_jobs = [] @@ -361,29 +390,21 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): 'ensemble': 'nvt', 'thermostat': 'langevin', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'nvt', 'thermostat': 'mttk', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'nvt', 'thermostat': 'bussi', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'npt', 'thermostat': 'bussi', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, ] @@ -394,67 +415,40 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): 'ensemble': 'nvt', 'thermostat': 'langevin', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'nvt', 'thermostat': 'mttk', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'nvt', 'thermostat': 'bussi', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'npt', 'thermostat': 'bussi', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, ] ) -def add_md_sampling_job( - ensemble, thermostat, device_name, ranks_per_partition, aggregator -): +def add_md_sampling_job(ensemble, thermostat, device_name): """Add a MD sampling job to the workflow.""" sim_mode = f'{ensemble}_{thermostat}_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_union_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_union_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def md_sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_union_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -473,13 +467,29 @@ def md_sampling_operation(*jobs): device, ensemble, thermostat, - complete_filename=f'{sim_mode}_{device_name}_complete', ) if communicator.rank == 0: - print(f'completed lj_union_{sim_mode}_{device_name}: {job}') - - md_sampling_jobs.append(md_sampling_operation) + print(f'completed {action_name}: {job}') + + md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=md_sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in md_job_definitions: @@ -502,20 +512,16 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non extra_loggables (list): List of extra loggables to log to gsd files. Patch energies are logged by default. """ - import hoomd - from custom_actions import ComputeDensity - from hoomd import hpmc - if extra_loggables is None: extra_loggables = [] # integrator - mc = hpmc.integrate.Sphere(nselect=1) + mc = hoomd.hpmc.integrate.Sphere(nselect=1) mc.shape['A'] = dict(diameter=0.0) mc.shape['R'] = dict(diameter=0.0, orientable=True) # pair potential - lennard_jones = hpmc.pair.LennardJones() + lennard_jones = hoomd.hpmc.pair.LennardJones() lennard_jones.params[('A', 'A')] = dict( epsilon=LJ_PARAMS['epsilon'] / job.cached_statepoint['kT'], sigma=LJ_PARAMS['sigma'], @@ -527,7 +533,7 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non lennard_jones.mode = 'xplor' - lj_union = hpmc.pair.Union(constituent_potential=lennard_jones) + lj_union = hoomd.hpmc.pair.Union(constituent_potential=lennard_jones) lj_union.body['A'] = dict(positions=[], types=[]) lj_union.body['R'] = dict(positions=CUBE_VERTS, types=['A'] * len(CUBE_VERTS)) @@ -565,7 +571,7 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non compute_density.attach(sim) # move size tuner - mstuner = hpmc.tune.MoveSize.scale_solver( + mstuner = hoomd.hpmc.tune.MoveSize.scale_solver( moves=['a', 'd'], types=['R'], target=0.2, @@ -583,19 +589,21 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_mc_sim(job, device, complete_filename): +def run_nvt_mc_sim(job, device): """Run MC sim in NVT.""" - import hoomd - # simulation sim_mode = 'nvt_mc' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('lj_union_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode) @@ -656,7 +664,7 @@ def run_nvt_mc_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -665,23 +673,23 @@ def run_nvt_mc_sim(job, device, complete_filename): ) -def run_npt_mc_sim(job, device, complete_filename): +def run_npt_mc_sim(job, device): """Run MC sim in NPT.""" - import hoomd - from hoomd import hpmc - sim_mode = 'npt_mc' + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('lj_union_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates - boxmc = hpmc.update.BoxMC( + boxmc = hoomd.hpmc.update.BoxMC( betaP=job.cached_statepoint['pressure'] / job.cached_statepoint['kT'], trigger=hoomd.trigger.Periodic(1), ) @@ -694,7 +702,7 @@ def run_npt_mc_sim(job, device, complete_filename): sim.operations.add(boxmc) - boxmc_tuner = hpmc.tune.BoxMCMoveSize.scale_solver( + boxmc_tuner = hoomd.hpmc.tune.BoxMCMoveSize.scale_solver( trigger=hoomd.trigger.And( [ hoomd.trigger.Periodic(400), @@ -771,7 +779,7 @@ def run_npt_mc_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -785,47 +793,27 @@ def run_npt_mc_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, ] -def add_mc_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_mc_sampling_job(mode, device_name): """Add a MC sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) + action_name = f'{__name__}.{mode}_mc_{device_name}' - @Project.pre.after(lj_union_create_initial_state) - @Project.post.isfile(f'{mode}_mc_{device_name}_complete') - @Project.operation( - name=f'lj_union_{mode}_mc_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_union_{mode}_mc_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -841,183 +829,186 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_mc_sim')( - job, device, complete_filename=f'{mode}_mc_{device_name}_complete' - ) + globals().get(f'run_{mode}_mc_sim')(job, device) if communicator.rank == 0: - print(f'completed lj_union_{mode}_mc_{device_name} {job}') - - mc_sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + mc_sampling_jobs.append(action_name) + + sim_mode = mode + '_mc' + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in mc_job_definitions: add_mc_sampling_job(**definition) -@Project.pre(is_lj_union) -@Project.pre.after(*md_sampling_jobs) -@Project.pre.after(*mc_sampling_jobs) -@Project.post.true('lj_union_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def lj_union_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting lj_union_analyze:', job) - - sim_modes = [ - 'nvt_langevin_md_cpu', - 'nvt_mttk_md_cpu', - 'nvt_bussi_md_cpu', - 'npt_bussi_md_cpu', - ] + for job in jobs: + print(f'starting {__name__}.analyze:', job) - if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): - sim_modes.extend( - [ - 'nvt_langevin_md_gpu', - 'nvt_mttk_md_gpu', - 'nvt_bussi_md_gpu', - 'npt_bussi_md_gpu', - ] - ) + sim_modes = [ + 'nvt_langevin_md_cpu', + 'nvt_mttk_md_cpu', + 'nvt_bussi_md_cpu', + 'npt_bussi_md_cpu', + ] - if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): - sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): + sim_modes.extend( + [ + 'nvt_langevin_md_gpu', + 'nvt_mttk_md_gpu', + 'nvt_bussi_md_gpu', + 'npt_bussi_md_gpu', + ] + ) - util._sort_sim_modes(sim_modes) + if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): + sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) - timesteps = {} - energies = {} - pressures = {} - densities = {} - linear_momentum = {} + util._sort_sim_modes(sim_modes) - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + timesteps = {} + energies = {} + pressures = {} + densities = {} + linear_momentum = {} - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - if 'md' in sim_mode: - energies[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' - ] - else: - energies[sim_mode] = ( - log_traj['hoomd-data/hpmc/pair/Union/energy'] - * job.cached_statepoint['kT'] - ) + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - energies[sim_mode] /= job.cached_statepoint['num_particles'] + if 'md' in sim_mode: + energies[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' + ] + else: + energies[sim_mode] = ( + log_traj['hoomd-data/hpmc/pair/Union/energy'] + * job.cached_statepoint['kT'] + ) - if 'md' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' - ] - else: - pressures[sim_mode] = numpy.full(len(energies[sim_mode]), numpy.nan) + energies[sim_mode] /= job.cached_statepoint['num_particles'] - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + if 'md' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' + ] + else: + pressures[sim_mode] = numpy.full(len(energies[sim_mode]), numpy.nan) - if 'md' in sim_mode and 'langevin' not in sim_mode: - momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] - linear_momentum[sim_mode] = [ - math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' ] - else: - linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) - - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - potential_energy=float(numpy.mean(energies[mode])), - density=float(numpy.mean(densities[mode])), - ) - - fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') - ax = fig.add_subplot(2, 2, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 2, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - ax = fig.add_subplot(2, 2, 3) - util.plot_timeseries( - ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 - ) + if 'md' in sim_mode and 'langevin' not in sim_mode: + momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] + linear_momentum[sim_mode] = [ + math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) + for v in momentum_vector + ] + else: + linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) + + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + potential_energy=float(numpy.mean(energies[mode])), + density=float(numpy.mean(densities[mode])), + ) - ax = fig.add_subplot(2, 2, 4) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data={ - mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] - for mode, lm in linear_momentum.items() - }, - ylabel=r'$|\vec{p}| / N$', - max_points=500, - ) + fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') + ax = fig.add_subplot(2, 2, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, + ) + ax.legend() - fig.suptitle( - f'$kT={job.cached_statepoint["kT"]}$, ' - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + ax = fig.add_subplot(2, 2, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) - job.document['lj_union_analysis_complete'] = True + ax = fig.add_subplot(2, 2, 3) + util.plot_timeseries( + ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 + ) + ax = fig.add_subplot(2, 2, 4) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data={ + mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] + for mode, lm in linear_momentum.items() + }, + ylabel=r'$|\vec{p}| / N$', + max_points=500, + ) -analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles'], sort_by='replicate_idx', select=is_lj_union + fig.suptitle( + f'$kT={job.cached_statepoint["kT"]}$, ' + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': md_sampling_jobs + mc_sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, + ), ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_union_analysis_complete')) -@Project.post(lambda *jobs: util.true_all(*jobs, key='lj_union_compare_modes_complete')) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, -) -def lj_union_compare_modes(*jobs): +def compare_modes(*jobs): """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting lj_union_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [ 'nvt_langevin_md_cpu', @@ -1104,29 +1095,28 @@ def lj_union_compare_modes(*jobs): filename = f'lj_union_compare_kT{kT}_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_union_compare_modes_complete'] = True - -@Project.pre.after(*md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_union_distribution_analyze_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), ) -def lj_union_distribution_analyze(*jobs): - """Checks that MD follows the correct KE distribution.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - import scipy + +def distribution_analyze(*jobs): + """Checks that MD follows the correct KE distribution.""" matplotlib.style.use('fivethirtyeight') - print('starting lj_union_distribution_analyze:', jobs[0]) + print(f'starting {__name__}.distribution_analyze:', jobs[0]) sim_modes = [ 'nvt_langevin_md_cpu', @@ -1183,7 +1173,6 @@ def lj_union_distribution_analyze(*jobs): n_rotate_dof = num_particles * 3 - print('Reading' + job.fn(sim_mode + '_quantities.h5')) log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) if 'md' in sim_mode: @@ -1312,8 +1301,21 @@ def lj_union_distribution_analyze(*jobs): ) fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_union_distribution_analyze_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.distribution_analyze', + Action( + method=distribution_analyze, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) ################################# @@ -1321,18 +1323,20 @@ def lj_union_distribution_analyze(*jobs): ################################# -def run_nve_md_sim(job, device, run_length, complete_filename): +def run_nve_md_sim(job, device, run_length): """Run the MD simulation in NVE.""" - import hoomd - sim_mode = 'nve_md' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') is_restarting = job.isfile(restart_filename) if is_restarting: initial_state = job.fn(restart_filename) else: - initial_state = job.fn('lj_union_initial_state_md.gsd') + initial_state = job.fn('initial_state_md.gsd') nve = hoomd.md.methods.ConstantVolume(hoomd.filter.Rigid(flags=('center',))) @@ -1356,7 +1360,7 @@ def run_nve_md_sim(job, device, run_length, complete_filename): ) if sim.timestep == RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -1367,32 +1371,10 @@ def run_nve_md_sim(job, device, run_length, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') -def is_lj_union_nve(job): - """Test if a given job should be run for NVE conservation.""" - return ( - job.cached_statepoint['subproject'] == 'lj_union' - and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS - ) - - -partition_jobs_cpu_mpi_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_lj_union_nve, -) - -partition_jobs_gpu_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_lj_union_nve, -) - nve_md_sampling_jobs = [] nve_md_job_definitions = [ { 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_nve, 'run_length': 10_000_000, }, ] @@ -1402,45 +1384,26 @@ def is_lj_union_nve(job): [ { 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu_nve, 'run_length': 100_000_000, }, ] ) -def add_nve_md_job(device_name, ranks_per_partition, aggregator, run_length): +def add_nve_md_job(device_name, run_length): """Add a MD NVE conservation job to the workflow.""" sim_mode = 'nve_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_union_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_union_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def lj_union_nve_md_job(*jobs): + def nve_action(*jobs): """Run NVE MD.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_union_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -1453,48 +1416,40 @@ def lj_union_nve_md_job(*jobs): job, f'{sim_mode}_{device_name}.log' ), ) - run_nve_md_sim( - job, - device, - run_length=run_length, - complete_filename=f'{sim_mode}_{device_name}_complete', - ) + run_nve_md_sim(job, device, run_length=run_length) if communicator.rank == 0: - print(f'completed lj_union_{sim_mode}_{device_name} {job}') - - nve_md_sampling_jobs.append(lj_union_nve_md_job) + print(f'completed {action_name}: {job}') + + nve_md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=nve_action, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_nve_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in nve_md_job_definitions: add_nve_md_job(**definition) -nve_analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles'], - sort_by='replicate_idx', - select=is_lj_union_nve, -) - -@Project.pre.after(*nve_md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_union_conservation_analysis_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=nve_analysis_aggregator, -) -def lj_union_conservation_analyze(*jobs): +def conservation_analyze(*jobs): """Analyze the output of NVE simulations and inspect conservation.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - matplotlib.style.use('fivethirtyeight') - print('starting lj_union_conservation_analyze:', jobs[0]) + print(f'starting {__name__}.conservation_analyze:', jobs[0]) sim_modes = ['nve_md_cpu'] if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): @@ -1573,5 +1528,18 @@ def plot(*, ax, data, quantity_name, legend=False): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_union_conservation_analysis_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.conservation_analyze', + Action( + method=conservation_analyze, + configuration={ + 'previous_actions': nve_md_sampling_jobs, + 'group': _group_compare | _include_nve, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/patchy_particle_pressure.py b/hoomd_validation/patchy_particle_pressure.py index 07c65eb5..0ff12e4e 100644 --- a/hoomd_validation/patchy_particle_pressure.py +++ b/hoomd_validation/patchy_particle_pressure.py @@ -3,14 +3,24 @@ """Test for consistency between NVT and NPT simulations of patchy particles.""" +import itertools import json import os -import pathlib +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -24,7 +34,9 @@ LOG_PERIOD = {'trajectory': 50_000, 'quantities': 500} NUM_CPU_RANKS = min(16, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -60,30 +72,33 @@ def job_statepoints(): ) -def is_patchy_particle_pressure(job): - """Test if a job is part of the patchy_particle_pressure subproject.""" - return job.cached_statepoint['subproject'] == 'patchy_particle_pressure' - - -def is_patchy_particle_pressure_positive_pressure(job): - """Test if a job is part of the patchy_particle_pressure subproject.""" - return ( - job.cached_statepoint['subproject'] == 'patchy_particle_pressure' - and job.cached_statepoint['pressure'] > 0 +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS ) - - -partition_jobs_cpu_mpi_nvt = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_patchy_particle_pressure, -) - -partition_jobs_cpu_mpi_npt = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_patchy_particle_pressure_positive_pressure, -) +} +_group_cpu_postive_pressure = _group_cpu | { + 'include': [{'all': [['/subproject', '==', __name__], ['/pressure', '>', 0]]}] +} + +_group_compare = _group | { + 'sort_by': [ + '/pressure', + '/density', + '/temperature', + '/chi', + '/num_particles', + '/long_range_interaction_scale_factor', + ], + 'split_by_sort_key': True, + 'submit_whole': True, +} def make_potential( @@ -108,8 +123,6 @@ def make_potential( The terminology (e.g., `ehat`) comes from the "Modelling Patchy Particles" HOOMD-blue tutorial. """ - import hoomd - r = [ (sigma + sq_well_lambda * sigma) / 2.0, sq_well_lambda * sigma, @@ -126,27 +139,16 @@ def make_potential( return angular_step -@Project.post.isfile('patchy_particle_pressure_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi_nvt, -) -def patchy_particle_pressure_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting patchy_particle_pressure_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -212,13 +214,28 @@ def patchy_particle_pressure_create_initial_state(*jobs): hoomd.write.GSD.write( state=sim.state, - filename=job.fn('patchy_particle_pressure_initial_state.gsd'), + filename=job.fn('initial_state.gsd'), mode='wb', logger=trajectory_logger, ) if communicator.rank == 0: - print(f'completed patchy_particle_pressure_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): @@ -238,10 +255,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - import numpy - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -322,17 +335,19 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" - import hoomd - sim_mode = 'nvt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('patchy_particle_pressure_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) @@ -393,24 +408,26 @@ def run_nvt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice(f'Ending {job} run early due to walltime limits.') -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" - import hoomd - # device sim_mode = 'npt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('patchy_particle_pressure_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates @@ -508,7 +525,7 @@ def run_npt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice(f'Ending {job} run early due to walltime limits.') @@ -519,44 +536,31 @@ def run_npt_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_nvt, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_npt, + 'resources': _resources_cpu, + 'group': _group_cpu_postive_pressure, }, ] -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) + action_name = f'{__name__}.{mode}_{device_name}' - @Project.pre.after(patchy_particle_pressure_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'patchy_particle_pressure_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting patchy_particle_pressure_{mode}_{device_name}:', job) + print(f'starting {action_name}:', job) device = hoomd.device.CPU( communicator=communicator, @@ -565,155 +569,147 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: - print(f'completed patchy_particle_pressure_{mode}_{device_name} ' f'{job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_patchy_particle_pressure) -@Project.pre.after(*sampling_jobs) -@Project.post.true('patchy_particle_pressure_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def patchy_particle_pressure_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting patchy_particle_pressure_analyze:', job) - - sim_modes = [] - for _ensemble in ['nvt', 'npt']: - if job.isfile(f'{_ensemble}_cpu_quantities.h5'): - sim_modes.append(f'{_ensemble}_cpu') - - util._sort_sim_modes(sim_modes) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - timesteps = {} - pressures = {} - densities = {} + sim_modes = [] + for _ensemble in ['nvt', 'npt']: + if job.isfile(f'{_ensemble}_cpu_quantities.h5'): + sim_modes.append(f'{_ensemble}_cpu') - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + util._sort_sim_modes(sim_modes) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + timesteps = {} + pressures = {} + densities = {} - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), - ) + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - # Plot results - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 2, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax_distribution = fig.add_subplot(2, 2, 2, sharey=ax) - util.plot_distribution( - ax_distribution, - {k: v for k, v in densities.items() if not k.startswith('nvt')}, - r'', - expected=job.cached_statepoint['density'], - bins=50, - plot_rotated=True, - ) - - ax = fig.add_subplot(2, 2, 3) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - ax_distribution = fig.add_subplot(2, 2, 4, sharey=ax) - util.plot_distribution( - ax_distribution, - pressures, - r'', - expected=job.cached_statepoint['pressure'], - bins=50, - plot_rotated=True, - ) + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' + ] - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'T={job.cached_statepoint["temperature"]}, ' - f'$\\chi={job.cached_statepoint["chi"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}, ' - '$\\varepsilon_{\\mathrm{rep}}/\\varepsilon_{\\mathrm{att}}$' - f'$={job.cached_statepoint["long_range_interaction_scale_factor"]}$' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight', transparent=False) + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - job.document['patchy_particle_pressure_analysis_complete'] = True + # Plot results + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 2, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, + ) + ax.legend() + + ax_distribution = fig.add_subplot(2, 2, 2, sharey=ax) + util.plot_distribution( + ax_distribution, + {k: v for k, v in densities.items() if not k.startswith('nvt')}, + r'', + expected=job.cached_statepoint['density'], + bins=50, + plot_rotated=True, + ) + ax = fig.add_subplot(2, 2, 3) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) + ax_distribution = fig.add_subplot(2, 2, 4, sharey=ax) + util.plot_distribution( + ax_distribution, + pressures, + r'', + expected=job.cached_statepoint['pressure'], + bins=50, + plot_rotated=True, + ) -@Project.pre( - lambda *jobs: util.true_all(*jobs, key='patchy_particle_pressure_analysis_complete') -) -@Project.post( - lambda *jobs: util.true_all( - *jobs, key='patchy_particle_pressure_compare_modes_complete' - ) -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=[ - 'pressure', - 'density', - 'temperature', - 'chi', - 'num_particles', - 'long_range_interaction_scale_factor', - ], - sort_by='replicate_idx', - select=is_patchy_particle_pressure, + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'T={job.cached_statepoint["temperature"]}, ' + f'$\\chi={job.cached_statepoint["chi"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}, ' + '$\\varepsilon_{\\mathrm{rep}}/\\varepsilon_{\\mathrm{att}}$' + f'$={job.cached_statepoint["long_range_interaction_scale_factor"]}$' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight', transparent=False) + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def patchy_particle_pressure_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy + +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting patchy_particle_pressure_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [] for _ensemble in ['nvt', 'npt']: @@ -811,5 +807,18 @@ def patchy_particle_pressure_compare_modes(*jobs): transparent=False, ) - for job in jobs: - job.document['patchy_particle_pressure_compare_modes_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 4b0c72a1..80d32c58 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -6,27 +6,45 @@ # Define subproject flow operations import alj_2d import config -import flow import hard_disk import hard_sphere import lj_fluid import lj_union import patchy_particle_pressure +import signac import simple_polygon -from project_class import Project - -# use srun on delta (mpiexec fails on multiple nodes) -flow.environments.xsede.DeltaEnvironment.mpi_cmd = 'srun' - -__all__ = [ - 'alj_2d', - 'lj_fluid', - 'lj_union', - 'hard_disk', - 'hard_sphere', - 'simple_polygon', - 'patchy_particle_pressure', +from workflow_class import ValidationWorkflow + +all_subprojects = [ + alj_2d, + lj_fluid, + lj_union, + hard_disk, + hard_sphere, + simple_polygon, + patchy_particle_pressure, ] + +def init(args): + """Initialize the workspace.""" + # TODO: uncomment + # if (config.project_root / 'workspace').exists(): + # message = "The project already initialized." + # raise RuntimeError(message) + + project = signac.init_project(path=config.project_root) + + # initialize jobs for validation test projects + for subproject in all_subprojects: + # add all the jobs to the project + for job_sp in subproject.job_statepoints(): + project.open_job(job_sp).init() + + if __name__ == '__main__': - Project.get_project(config.project_root).main() + ValidationWorkflow.main( + entrypoint=config.project_root / 'hoomd_validation' / 'project.py', + init=init, + path=config.project_root, + ) diff --git a/hoomd_validation/project_class.py b/hoomd_validation/project_class.py deleted file mode 100644 index faae160f..00000000 --- a/hoomd_validation/project_class.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) 2022-2024 The Regents of the University of Michigan. -# Part of HOOMD-blue, released under the BSD 3-Clause License. - -"""FlowProject class for the validation test suite.""" - -from flow import FlowProject - - -class Project(FlowProject): - """Validation test projet.""" - - pass diff --git a/hoomd_validation/simple_polygon.py b/hoomd_validation/simple_polygon.py index 3ee45e46..8b715859 100644 --- a/hoomd_validation/simple_polygon.py +++ b/hoomd_validation/simple_polygon.py @@ -3,14 +3,24 @@ """Simple polygon equation of state validation test.""" +import itertools import json import os -import pathlib +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -34,7 +44,9 @@ LOG_PERIOD = {'trajectory': 50_000, 'quantities': 100} NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -58,45 +70,34 @@ def job_statepoints(): ) -def is_simple_polygon(job): - """Test if a given job is part of the simple_polygon subproject.""" - return job.cached_statepoint['subproject'] == 'simple_polygon' - - -partition_jobs_cpu_serial = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission']), - sort_by='density', - select=is_simple_polygon, -) - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_simple_polygon, -) +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_group_compare = _group | { + 'sort_by': ['/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} -@Project.post.isfile('simple_polygon_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi, -) -def simple_polygon_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting simple_polygon_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -145,13 +146,28 @@ def simple_polygon_create_initial_state(*jobs): hoomd.write.GSD.write( state=sim.state, - filename=job.fn('simple_polygon_initial_state.gsd'), + filename=job.fn('initial_state.gsd'), mode='wb', logger=trajectory_logger, ) if communicator.rank == 0: - print(f'completed simple_polygon_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): @@ -171,10 +187,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - import numpy - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -238,17 +250,19 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" - import hoomd - sim_mode = 'nvt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('simple_polygon_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) @@ -309,7 +323,7 @@ def run_nvt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -318,18 +332,19 @@ def run_nvt_sim(job, device, complete_filename): ) -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" - import hoomd - - # device sim_mode = 'npt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('simple_polygon_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates @@ -427,7 +442,7 @@ def run_npt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -441,39 +456,26 @@ def run_npt_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, ] -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) + action_name = f'{__name__}.{mode}_{device_name}' - @Project.pre.after(simple_polygon_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'simple_polygon_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] @@ -487,121 +489,124 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: - print(f'completed simple_polygon_{mode}_{device_name} {job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_simple_polygon) -@Project.pre.after(*sampling_jobs) -@Project.post.true('simple_polygon_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def simple_polygon_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting simple_polygon_analyze:', job) - - sim_modes = [] - for _ensemble in ['nvt', 'npt']: - if job.isfile(f'{_ensemble}_cpu_quantities.h5'): - sim_modes.append(f'{_ensemble}_cpu') - - util._sort_sim_modes(sim_modes) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - timesteps = {} - pressures = {} - densities = {} + sim_modes = [] + for _ensemble in ['nvt', 'npt']: + if job.isfile(f'{_ensemble}_cpu_quantities.h5'): + sim_modes.append(f'{_ensemble}_cpu') - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + util._sort_sim_modes(sim_modes) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + timesteps = {} + pressures = {} + densities = {} - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), - ) + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - # Plot results - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 1, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 1, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' + ] - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - job.document['simple_polygon_analysis_complete'] = True + # Plot results + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 1, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, + ) + ax.legend() + ax = fig.add_subplot(2, 1, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='simple_polygon_analysis_complete')) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='simple_polygon_compare_modes_complete') -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=['density', 'num_particles'], - sort_by='replicate_idx', - select=is_simple_polygon, + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def simple_polygon_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy + +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting simple_polygon_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [] for _ensemble in ['nvt', 'npt']: @@ -642,7 +647,7 @@ def simple_polygon_compare_modes(*jobs): avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - avg_quantity, stderr_quantity = util.plot_vs_expected( + util.plot_vs_expected( ax=ax, values=quantities, ylabel=labels[quantity_name], @@ -654,5 +659,18 @@ def simple_polygon_compare_modes(*jobs): filename = f'simple_polygon_compare_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['simple_polygon_compare_modes_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/util.py b/hoomd_validation/util.py index 14fbfb04..cfa5349b 100644 --- a/hoomd_validation/util.py +++ b/hoomd_validation/util.py @@ -5,26 +5,24 @@ import os +import h5py +import numpy import signac - -def true_all(*jobs, key): - """Check that a given key is true in all jobs.""" - return all(job.document.get(key, False) for job in jobs) - - -def total_ranks_function(ranks_per_job): - """Make a function that computes the number of ranks for an aggregate.""" - return lambda *jobs: ranks_per_job * len(jobs) +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') def get_job_filename(sim_mode, device, name, file_type): """Construct a job filename.""" - import hoomd - - suffix = 'cpu' - if isinstance(device, hoomd.device.GPU): - suffix = 'gpu' + if isinstance(device, str): + suffix = device + else: + suffix = 'cpu' + if isinstance(device, hoomd.device.GPU): + suffix = 'gpu' return f'{sim_mode}_{suffix}_{name}.{file_type}' @@ -103,8 +101,6 @@ def make_simulation( trajectory_logger (`hoomd.logging.Logger`): Logger to add to trajectory writer. """ - import hoomd - sim = hoomd.Simulation(device) sim.seed = make_seed(job, sim_mode) sim.create_state_from_gsd(initial_state) @@ -123,7 +119,7 @@ def make_simulation( # write particle trajectory to a gsd file trajectory_writer = hoomd.write.GSD( - filename=job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd')), + filename=job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd.tmp')), trigger=hoomd.trigger.And( [ hoomd.trigger.Periodic(trajectory_write_period), @@ -139,7 +135,7 @@ def make_simulation( logger.add(sim, quantities=['timestep']) quantity_writer = hoomd.write.HDF5Log( - filename=job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5')), + filename=job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5.tmp')), trigger=hoomd.trigger.And( [ hoomd.trigger.Periodic(log_write_period), @@ -154,6 +150,56 @@ def make_simulation( return sim +def is_simulation_complete( + job, + device, + sim_mode, +): + """Check if a simulation is complete. + + Check if all output files are present. + + Args: + job (`signac.Job`): signac job object. + + device (`hoomd.device.Device`): hoomd device object. + + sim_mode (str): String defining the simulation mode. + """ + gsd_exists = job.isfile(get_job_filename(sim_mode, device, 'trajectory', 'gsd')) + h5_exists = job.isfile(get_job_filename(sim_mode, device, 'quantities', 'h5')) + + return gsd_exists and h5_exists + + +def mark_simulation_complete( + job, + device, + sim_mode, +): + """Mark that simulation is complete. + + Moves .tmp files to the final filename. + + Args: + job (`signac.Job`): signac job object. + + device (`hoomd.device.Device`): hoomd device object. + + sim_mode (str): String defining the simulation mode. + """ + if device.communicator.rank == 0: + os.rename( + job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd.tmp')), + job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd')), + ) + + os.rename( + job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5.tmp')), + job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5')), + ) + + def make_seed(job, sim_mode=None): """Make a random number seed from a job. @@ -171,8 +217,6 @@ def plot_distribution( ax, data, independent_variable_label, expected=None, bins=100, plot_rotated=False ): """Plot distributions.""" - import numpy - max_density_histogram = 0 sim_modes = data.keys() @@ -241,8 +285,6 @@ def plot_vs_expected( ax, values, ylabel, expected=0, relative_scale=None, separate_nvt_npt=False ): """Plot values vs an expected value.""" - import numpy - sim_modes = values.keys() avg_value = {} @@ -309,8 +351,6 @@ def plot_vs_expected( def plot_timeseries(ax, timesteps, data, ylabel, expected=None, max_points=None): """Plot data as a time series.""" - import numpy - provided_modes = list(data.keys()) for mode in provided_modes: @@ -344,9 +384,6 @@ def _sort_sim_modes(sim_modes): def read_log(filename): """Read a HDF5 log as a dictionary of logged quantities.""" - import h5py - import numpy - with h5py.File(mode='r', name=filename) as f: keys = [] f.visit(lambda name: keys.append(name)) diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py new file mode 100644 index 00000000..83605d8d --- /dev/null +++ b/hoomd_validation/workflow.py @@ -0,0 +1,172 @@ +# Copyright (c) 2022-2024 The Regents of the University of Michigan. +# Part of HOOMD-blue, released under the BSD 3-Clause License. + +"""Manage row actions from Python. + +* Subclass `Workflow` to create a new workflow. +* Call ``YourWorkflow.add_action`` to add a new action to the workflow. +* Call ``YourWorkflow.write_workflow`` to write ``workflow.toml`` with the configuration + of all actions. +* Call ``YourWorkflow.main()`` in ``project.py`` to parse command line arguments and + dispatch the correct action method. +""" + +import argparse +import subprocess +from pathlib import Path + +import rtoml +import signac + + +def _get_cluster_name(): + """Get the current cluster name.""" + result = subprocess.run( + ['row', 'show', 'cluster', '--short'], + capture_output=True, + check=True, + text=True, + ) + return result.stdout.strip() + + +class Action: + """Represent a row action. + + An `Action` consists of a method that implements the action and the row + configuration options for that action. The method is called by `__call__`. The + configuration is stored as a raw dictionary that maps directly to the ``[action]`` + element of the row ``workflow.toml``. + + The method must be a function that takes the argument(s) ``*jobs``. + + Args: + method(callable): The method that implements this action. It must take the + argument ``*jobs``. + configuration(dict): Configuration options for the action to be written to + ``workflow.toml``. + """ + + def __init__(self, method, configuration): + if 'name' in configuration: + message = 'configuration must not contain "name"' + raise ValueError(message) + + self._method = method + self._configuration = configuration + + def __call__(self, *jobs): + """Call the `method` given on construction.""" + self._method(*jobs) + + +class Workflow: + """Represent a single workflow.""" + + _actions = {} + + @classmethod + def add_action(cls, name, action): + """Add an action. + + Args: + name(str): The action's name. Must be unique. + action(Action): The action itself. + """ + if name in cls._actions: + message = f'Action {name} cannot be added twice.' + raise ValueError(message) + + cls._actions[name] = action + + @classmethod + def write_workflow(cls, entrypoint, path=None, default=None, account=None): + """Write the file ``workflow.toml``. + + ``workflow.toml`` will include the signac workspace definition, the given + ``default`` mapping (when provided), and configurations for all added actions. + + Note: + ``default.action.command`` will be automatically set based on the value of + `entrypoint`. + + Args: + entrypoint(str): Name of the python file that calls the `main` entrypoint. + path(Path): Path to write ``workflow.toml``. + default(dict): The ``[default]`` mapping. + account(str): Name of the cluster account to use. + """ + workflow = { + 'workspace': {'path': 'workspace', 'value_file': 'signac_statepoint.json'} + } + + workflow['default'] = { + 'action': { + 'command': f'python -u {entrypoint} action $ACTION_NAME {{directories}}' + } + } + if account is not None: + workflow['default']['action'].update( + {'submit_options': {_get_cluster_name(): {'account': account}}} + ) + + if default is not None: + workflow['default'].update(default) + + workflow['action'] = [] + for name, action_item in cls._actions.items(): + action = {'name': name} + action.update(action_item._configuration) + workflow['action'].append(action) + + if path is None: + path = Path('.') + + with open(path / 'workflow.toml', 'w', encoding='utf-8') as workflow_file: + rtoml.dump(workflow, workflow_file, pretty=True) + + @classmethod + def main(cls, init=None, init_args=None, **kwargs): + """Implement the main entrypoint for ``project.py``. + + Valid commands are: + * ``python project.py init`` + * ``python project.py action action_name directories`` + + ``init`` will call the user-provided method ``init``, then generate the file + ``workflow.toml``. When provided, items in the ``init_args`` list will be added + as options to the ``init`` subparser with ``add_argument``. + + Args: + init(callable): User-provided initializaiton routine. Must take one + argument: ``args`` - the ``argparse`` parsed arguments. + init_args(list[str]): List of args to add to the ``init`` subparser. + **kwargs: Fowarded to `make_workflow`. + """ + parser = argparse.ArgumentParser() + command = parser.add_subparsers(dest='command', required=True) + init_parser = command.add_parser('init') + init_parser.add_argument('--account') + if init_args is not None: + for arg in init_args: + init_parser.add_argument(arg) + + action_parser = command.add_parser('action') + action_parser.add_argument('action') + action_parser.add_argument('directories', nargs='+') + + args = parser.parse_args() + + if args.command == 'init': + if init is not None: + init(args) + + cls.write_workflow(account=args.account, **kwargs) + elif args.command == 'action': + project = signac.get_project() + jobs = [project.open_job(id=directory) for directory in args.directories] + cls._actions[args.action](*jobs) + + else: + message = f'Invalid command: {args.command}' + raise RuntimeError(message) diff --git a/hoomd_validation/workflow_class.py b/hoomd_validation/workflow_class.py new file mode 100644 index 00000000..756de9d0 --- /dev/null +++ b/hoomd_validation/workflow_class.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2024 The Regents of the University of Michigan. +# Part of HOOMD-blue, released under the BSD 3-Clause License. + +"""Workflow class for the validation test suite.""" + +from workflow import Workflow + + +class ValidationWorkflow(Workflow): + """Validation test workflow.""" + + pass diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 52444d86..00000000 --- a/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -signac >= 2.2.0 -signac-flow >= 0.25.1 -signac-dashboard -matplotlib -gsd -numpy -scipy