Skip to content

Commit

Permalink
Port alj_2d to row.
Browse files Browse the repository at this point in the history
  • Loading branch information
joaander committed Aug 19, 2024
1 parent 0890261 commit 1a66809
Show file tree
Hide file tree
Showing 4 changed files with 112 additions and 109 deletions.
211 changes: 107 additions & 104 deletions hoomd_validation/alj_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,24 @@

"""ALJ 2D energy conservation validation test."""

import itertools
import math
import os
import pathlib

import hoomd
import matplotlib
import matplotlib.figure
import matplotlib.style
import numpy
import util
from config import CONFIG
from flow import aggregator
from project_class import Project
from workflow import Action
from workflow_class import ValidationWorkflow

# Run parameters shared between simulations.
# Step counts must be even and a multiple of the log quantity period.
RANDOMIZE_STEPS = 20_000
RUN_STEPS = 50_000_000
RUN_STEPS = 50_000_0
RESTART_STEPS = RUN_STEPS // 100
TOTAL_STEPS = RANDOMIZE_STEPS + RUN_STEPS

Expand All @@ -36,7 +42,9 @@
NUM_REPLICATES = min(4, CONFIG['replicates'])
NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim'])

WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60
WALLTIME_STOP_SECONDS = (
int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10
) * 60


def job_statepoints():
Expand All @@ -57,45 +65,37 @@ def job_statepoints():
)


def is_alj_2d(job):
"""Test if a given job is part of the alj_2d subproject."""
return job.cached_statepoint['subproject'] == 'alj_2d'


partition_jobs_cpu = aggregator.groupsof(
num=min(NUM_REPLICATES, CONFIG['max_cores_submission'] // NUM_CPU_RANKS),
sort_by='density',
select=is_alj_2d,
)
_group = {
'sort_by': ['/density'],
'include': [{'condition': ['/subproject', '==', __name__]}],
}
_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}}
_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}}
_group_cpu = _group | {
'maximum_size': min(
CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS
)
}
_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1}
_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']}

partition_jobs_gpu = aggregator.groupsof(
num=min(NUM_REPLICATES, CONFIG['max_gpus_submission']),
sort_by='density',
select=is_alj_2d,
)
_group_compare = _group | {
'sort_by': ['/kT', '/density', '/num_particles'],
'split_by_sort_key': True,
'submit_whole': True,
}


@Project.post.isfile('alj_2d_initial_state.gsd')
@Project.operation(
directives=dict(
executable=CONFIG['executable'],
nranks=util.total_ranks_function(NUM_CPU_RANKS),
walltime=CONFIG['short_walltime'],
),
aggregator=partition_jobs_cpu,
)
def alj_2d_create_initial_state(*jobs):
def create_initial_state(*jobs):
"""Create initial system configuration."""
import itertools

import hoomd
import numpy

communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS)
job = jobs[communicator.partition]

if job.isfile('initial_state.gsd'):
return

if communicator.rank == 0:
print('starting alj2_create_initial_state:', job)
print(f'starting {__name__}.create_initial_state:', job)

init_diameter = CIRCUMCIRCLE_RADIUS * 2 * 1.15

Expand Down Expand Up @@ -143,11 +143,26 @@ def alj_2d_create_initial_state(*jobs):
device.notice('Done.')

hoomd.write.GSD.write(
state=sim.state, filename=job.fn('alj_2d_initial_state.gsd'), mode='wb'
state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb'
)

if communicator.rank == 0:
print(f'completed alj_2d_create_initial_state: {job}')
print(f'completed {__name__}.create_initial_state: {job}')


ValidationWorkflow.add_action(
f'{__name__}.create_initial_state',
Action(
method=create_initial_state,
configuration={
'products': ['initial_state.gsd'],
'launchers': ['mpi'],
'group': _group_cpu,
'resources': _resources_cpu
| {'walltime': {'per_submission': CONFIG['short_walltime']}},
},
),
)


def make_md_simulation(
Expand All @@ -172,18 +187,15 @@ def make_md_simulation(
period_multiplier (int): Factor to multiply the GSD file periods by.
"""
import hoomd
from hoomd import md

incircle_d = INCIRCLE_RADIUS * 2
circumcircle_d = CIRCUMCIRCLE_RADIUS * 2
r_cut = max(
2 ** (1 / 6) * incircle_d, circumcircle_d + 2 ** (1 / 6) * 0.15 * incircle_d
)

# pair force
nlist = md.nlist.Cell(buffer=0.4)
alj = md.pair.aniso.ALJ(default_r_cut=r_cut, nlist=nlist)
nlist = hoomd.md.nlist.Cell(buffer=0.4)
alj = hoomd.md.pair.aniso.ALJ(default_r_cut=r_cut, nlist=nlist)
alj.shape['A'] = {'vertices': PARTICLE_VERTICES, 'faces': [], 'rounding_radii': 0}
alj.params[('A', 'A')] = {
'epsilon': ALJ_PARAMS['epsilon'],
Expand All @@ -193,12 +205,12 @@ def make_md_simulation(
}

# integrator
integrator = md.Integrator(
integrator = hoomd.md.Integrator(
dt=0.0001, methods=[method], forces=[alj], integrate_rotational_dof=True
)

# compute thermo
thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All())
thermo = hoomd.md.compute.ThermodynamicQuantities(hoomd.filter.All())

# add gsd log quantities
logger = hoomd.logging.Logger(categories=['scalar', 'sequence'])
Expand Down Expand Up @@ -238,16 +250,18 @@ def make_md_simulation(
return sim


def run_nve_md_sim(job, device, complete_filename):
def run_nve_md_sim(job, device):
"""Run the MD simulation in NVE."""
import hoomd

sim_mode = 'nve_md'

if util.is_simulation_complete(job, device, sim_mode):
return

restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd')
if job.isfile(restart_filename):
initial_state = job.fn(restart_filename)
else:
initial_state = job.fn('alj_2d_initial_state.gsd')
initial_state = job.fn('initial_state.gsd')

nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All())

Expand All @@ -268,7 +282,7 @@ def run_nve_md_sim(job, device, complete_filename):
hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb')

if sim.timestep == TOTAL_STEPS:
pathlib.Path(job.fn(complete_filename)).touch()
util.mark_simulation_complete(job, device, sim_mode)
device.notice('Done.')
else:
device.notice(
Expand All @@ -281,8 +295,6 @@ def run_nve_md_sim(job, device, complete_filename):
nve_md_job_definitions = [
{
'device_name': 'cpu',
'ranks_per_partition': NUM_CPU_RANKS,
'aggregator': partition_jobs_cpu,
},
]

Expand All @@ -291,44 +303,25 @@ def run_nve_md_sim(job, device, complete_filename):
[
{
'device_name': 'gpu',
'ranks_per_partition': 1,
'aggregator': partition_jobs_gpu,
},
]
)


def add_nve_md_job(device_name, ranks_per_partition, aggregator):
def add_nve_md_job(device_name):
"""Add a MD NVE conservation job to the workflow."""
sim_mode = 'nve_md'
action_name = f'{__name__}.{sim_mode}_{device_name}'

directives = dict(
walltime=CONFIG['max_walltime'],
executable=CONFIG['executable'],
nranks=util.total_ranks_function(ranks_per_partition),
)

if device_name == 'gpu':
directives['ngpu'] = util.total_ranks_function(ranks_per_partition)

@Project.pre.after(alj_2d_create_initial_state)
@Project.post.isfile(f'{sim_mode}_{device_name}_complete')
@Project.operation(
name=f'alj_2d_{sim_mode}_{device_name}',
directives=directives,
aggregator=aggregator,
)
def alj_2d_nve_md_job(*jobs):
def nve_action(*jobs):
"""Run NVE MD."""
import hoomd

communicator = hoomd.communicator.Communicator(
ranks_per_partition=ranks_per_partition
ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY'])
)
job = jobs[communicator.partition]

if communicator.rank == 0:
print(f'starting alj_2d_{sim_mode}_{device_name}:', job)
print(f'starting {action_name}:', job)

if device_name == 'gpu':
device_cls = hoomd.device.GPU
Expand All @@ -341,43 +334,40 @@ def alj_2d_nve_md_job(*jobs):
job, f'{sim_mode}_{device_name}.log'
),
)
run_nve_md_sim(
job, device, complete_filename=f'{sim_mode}_{device_name}_complete'
)
run_nve_md_sim(job, device)

if communicator.rank == 0:
print(f'completed alj_2d_{sim_mode}_{device_name}: {job}')

nve_md_sampling_jobs.append(alj_2d_nve_md_job)
print(f'completed {action_name}: {job}')

nve_md_sampling_jobs.append(action_name)

ValidationWorkflow.add_action(
action_name,
Action(
method=nve_action,
configuration={
'products': [
util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'),
util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'),
],
'launchers': ['mpi'],
'group': globals().get(f'_group_{device_name}'),
'resources': globals().get(f'_resources_{device_name}'),
'previous_actions': [f'{__name__}.create_initial_state'],
},
),
)


for definition in nve_md_job_definitions:
add_nve_md_job(**definition)

analysis_aggregator = aggregator.groupby(
key=['kT', 'density', 'num_particles'], sort_by='replicate_idx', select=is_alj_2d
)


@Project.pre.after(*nve_md_sampling_jobs)
@Project.post(
lambda *jobs: util.true_all(*jobs, key='alj_2d_conservation_analysis_complete')
)
@Project.operation(
directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']),
aggregator=analysis_aggregator,
)
def alj_2d_conservation_analyze(*jobs):
def conservation_analyze(*jobs):
"""Analyze the output of NVE simulations and inspect conservation."""
import math

import matplotlib
import matplotlib.figure
import matplotlib.style

matplotlib.style.use('fivethirtyeight')

print('starting alj_2d_conservation_analyze:', jobs[0])
print(f'starting {__name__}.conservation_analyze:', jobs[0])

sim_modes = ['nve_md_cpu']
if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')):
Expand Down Expand Up @@ -456,5 +446,18 @@ def plot(*, ax, data, quantity_name, legend=False):

fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight')

for job in jobs:
job.document['alj_2d_conservation_analysis_complete'] = True

ValidationWorkflow.add_action(
f'{__name__}.conservation_analyze',
Action(
method=conservation_analyze,
configuration={
'previous_actions': nve_md_sampling_jobs,
'group': _group_compare,
'resources': {
'processes': {'per_submission': 1},
'walltime': {'per_directory': '00:02:00'},
},
},
),
)
2 changes: 1 addition & 1 deletion hoomd_validation/lj_fluid.py
Original file line number Diff line number Diff line change
Expand Up @@ -1357,7 +1357,7 @@ def nve_action(*jobs):
)

if communicator.rank == 0:
print(f'completed {action_name} {job}')
print(f'completed {action_name}: {job}')

nve_md_sampling_jobs.append(action_name)

Expand Down
4 changes: 2 additions & 2 deletions hoomd_validation/lj_union.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# Step counts must be even and a multiple of the log quantity period.
RANDOMIZE_STEPS = 20_000
EQUILIBRATE_STEPS = 100_000
RUN_STEPS = 500_000
RUN_STEPS = 50_000_000
RESTART_STEPS = RUN_STEPS // 10
TOTAL_STEPS = RANDOMIZE_STEPS + EQUILIBRATE_STEPS + RUN_STEPS

Expand Down Expand Up @@ -1422,7 +1422,7 @@ def nve_action(*jobs):
)

if communicator.rank == 0:
print(f'completed {action_name} {job}')
print(f'completed {action_name}: {job}')

nve_md_sampling_jobs.append(action_name)

Expand Down
4 changes: 2 additions & 2 deletions hoomd_validation/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"""Project workflow entry point."""

# Define subproject flow operations
# import alj_2d
import alj_2d
import config

# import hard_disk
Expand All @@ -18,7 +18,7 @@
from workflow_class import ValidationWorkflow

all_subprojects = [
# 'alj_2d',
alj_2d,
lj_fluid,
lj_union,
# 'hard_disk',
Expand Down

0 comments on commit 1a66809

Please sign in to comment.