Skip to content

Commit

Permalink
Merge branch 'feature/lammps' of github.com:materialsproject/mpmorph …
Browse files Browse the repository at this point in the history
…into feature/lammps
  • Loading branch information
mcgalcode committed Feb 22, 2023
2 parents 268601a + f68009e commit 0fc5ce9
Show file tree
Hide file tree
Showing 19 changed files with 81 additions and 81 deletions.
2 changes: 1 addition & 1 deletion src/mpmorph/analysis/diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def plot(self, title=None, annotate=True, el="", **kwargs):
self.y,
yerr=self.yerr.T,
label="Q[{}]: ".format(el) + tx + " K",
**kwargs
**kwargs,
)
plt.ylabel("ln(D cm$^2$/s)", fontsize=15)
plt.xlabel("1000/T K$^{-1}$", fontsize=15)
Expand Down
47 changes: 26 additions & 21 deletions src/mpmorph/analysis/melting_points.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,29 +7,29 @@
import matplotlib.pyplot as plt
import math

class MeltingPointClusterAnalyzer():

class MeltingPointClusterAnalyzer:
def _get_clusters(self, points):
clustering = AgglomerativeClustering(n_clusters=2).fit(points)
cluster1 = points[np.argwhere(clustering.labels_ == 1).squeeze()].T
cluster2 = points[np.argwhere(clustering.labels_ == 0).squeeze()].T
return cluster1, cluster2

def plot_vol_vs_temp(self, ts, vs, plot_title = None):
def plot_vol_vs_temp(self, ts, vs, plot_title=None):
points = np.array(list(zip(ts, vs)))
cluster1, cluster2 = self._get_clusters(points)
plt.scatter(*cluster1)
plt.scatter(*cluster2)
plt.xlabel("Temperature (K)")
plt.ylabel("Volume (A^3)")
Tm = self.estimate_melting_temp(ts, vs)
plt.plot([Tm, Tm], [min(vs), max(vs)], color='r')
plt.plot([Tm, Tm], [min(vs), max(vs)], color="r")

if plot_title is None:
plt.title("Volume vs Temperature by Clustering")
else:
plt.title(plot_title)

def estimate_melting_temp(self, temps, vols):
points = np.array(list(zip(temps, vols)))
cluster1, cluster2 = self._get_clusters(points)
Expand All @@ -42,8 +42,8 @@ def estimate_melting_temp(self, temps, vols):

return np.mean([max(solid_range), min(liquid_range)])

class MeltingPointSlopeAnalyzer():

class MeltingPointSlopeAnalyzer:
def split_dset(self, pts, split_idx):
return pts[0:split_idx], pts[split_idx:]

Expand All @@ -56,7 +56,7 @@ def assess_splits(self, xs, ys):
for idx in pt_idxs:
_, _, _, _, total_err = self.get_split_fit(xs, ys, idx)
errs.append(total_err)

return list(zip(pt_idxs, errs))

def get_linear_ys(self, m, b, xs):
Expand All @@ -79,56 +79,61 @@ def plot_split(self, xs, ys, split_idx):

plt.scatter(rightxs, rightys)
plt.plot(rightxs, right_fit_ys)

def get_best_split(self, xs, ys):
split_errs = self.assess_splits(xs, ys)
errs = [pt[1] for pt in split_errs]
idxs = [pt[0] for pt in split_errs]
best_split_idx = idxs[np.argmin(errs)]
return best_split_idx

def plot_vol_vs_temp(self, temps, vols):
split_idx = self.get_best_split(temps, vols)
self.plot_split(temps, vols, split_idx)
Tm = self.estimate_melting_temp(temps, vols)
print(Tm)
plt.plot([Tm, Tm], [min(vols), max(vols)], color='r')

plt.plot([Tm, Tm], [min(vols), max(vols)], color="r")

def estimate_melting_temp(self, temps, vols):
best_split_idx = self.get_best_split(temps, vols)
return np.mean([temps[best_split_idx], temps[best_split_idx - 1]])

class MeltingPointSlopeRMSEAnalyzer(MeltingPointSlopeAnalyzer):

class MeltingPointSlopeRMSEAnalyzer(MeltingPointSlopeAnalyzer):
def get_split_fit(self, xs, ys, split_idx):
leftx, rightx = self.split_dset(xs, split_idx)
lefty, righty = self.split_dset(ys, split_idx)

lslope, lintercept, r_value, p_value, std_err = linregress(leftx, lefty)
left_y_pred = lintercept + lslope * np.array(leftx)
lefterr = mean_squared_error(y_true=lefty, y_pred=left_y_pred, squared=False)

rslope, rintercept, r_value, p_value, std_err = linregress(rightx, righty)
right_y_pred = rintercept + rslope * np.array(rightx)
righterr = mean_squared_error(y_true=righty, y_pred=right_y_pred, squared=False)
combined_err = math.sqrt(lefterr ** 2 + righterr ** 2)

combined_err = math.sqrt(lefterr**2 + righterr**2)
combined_err = lefterr + righterr
return lslope, lintercept, rslope, rintercept, combined_err

class MeltingPointSlopeStdErrAnalyzer(MeltingPointSlopeAnalyzer):

class MeltingPointSlopeStdErrAnalyzer(MeltingPointSlopeAnalyzer):
def get_split_fit(self, xs, ys, split_idx):
leftx, rightx = self.split_dset(xs, split_idx)
lefty, righty = self.split_dset(ys, split_idx)

leftfit = linregress(leftx, lefty)
lefterr = leftfit.stderr

rightfit = linregress(rightx, righty)
righterr = rightfit.stderr
combined_err = math.sqrt(lefterr ** 2 + righterr ** 2)

combined_err = math.sqrt(lefterr**2 + righterr**2)
combined_err = lefterr + righterr
return leftfit.slope, leftfit.intercept, rightfit.slope, rightfit.intercept, combined_err
return (
leftfit.slope,
leftfit.intercept,
rightfit.slope,
rightfit.intercept,
combined_err,
)
3 changes: 0 additions & 3 deletions src/mpmorph/analysis/structural_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def polyhedra_connectivity(structures, pair, cutoff, step_freq=1):
polyhedra_list.append(set(current_poly))

for polypair in itertools.combinations(polyhedra_list, 2):

polyhedra_pair_type = (len(polypair[0]), len(polypair[1]))

shared_vertices = len(polypair[0].intersection(polypair[1]))
Expand Down Expand Up @@ -143,7 +142,6 @@ class BondAngleDistribution(object):
"""

def __init__(self, structures, cutoffs, step_freq=1):

self.bond_angle_distribution = None
self.structures = structures
self.step_freq = step_freq
Expand Down Expand Up @@ -251,7 +249,6 @@ def get_bond_angle_distribution(self):

# get all pair combinations of neoghbor sites of i:
for p in itertools.combinations(neighbors[i], 2):

# check if pairs are within the defined cutoffs
if self._cutoff_type == "dict":
if self._check_skip_triplet(s_index, i, p[0][2], p[1][2]):
Expand Down
3 changes: 1 addition & 2 deletions src/mpmorph/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def __init__(
collection="tasks",
user=None,
password=None,
**kwargs
**kwargs,
):
super(VaspMDCalcDb, self).__init__(
host, port, database, collection, user, password, **kwargs
Expand Down Expand Up @@ -75,7 +75,6 @@ def insert_task(

# insert structures at each ionic step into GridFS
if parse_ionic_steps and "calcs_reversed" in task_doc:

# Convert from ionic steps dictionary to pymatgen.core.trajectory.Trajectory object
ionic_steps_dict = task_doc["calcs_reversed"][0]["output"]["ionic_steps"]
time_step = task_doc["input"]["incar"]["POTIM"]
Expand Down
1 change: 0 additions & 1 deletion src/mpmorph/firetasks/dbtasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,6 @@ def load_trajectories_from_gfs(runs, mmdb, gfs_keys=None):

trajectory = None
for i, (fs_id, fs) in enumerate(gfs_keys):

if fs == "trajectories_fs" or fs == "rebuild_trajectories_fs":
# Load stored Trajectory
print(fs_id, "is stored in trajectories_fs")
Expand Down
6 changes: 3 additions & 3 deletions src/mpmorph/fireworks/powerups.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def aggregate_trajectory(fw, **kwargs):
def add_cont_structure(fw):
prev_struct_task = PreviousStructureTask()
insert_i = 2
for (i, task) in enumerate(fw.tasks):
for i, task in enumerate(fw.tasks):
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
insert_i = i
break
Expand All @@ -68,7 +68,7 @@ def add_pass_pv(fw, **kwargs):

def add_pv_volume_rescale(fw):
insert_i = 2
for (i, task) in enumerate(fw.tasks):
for i, task in enumerate(fw.tasks):
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
insert_i = i
break
Expand All @@ -80,7 +80,7 @@ def add_pv_volume_rescale(fw):
def add_rescale_volume(fw, **kwargs):
rsv_task = RescaleVolumeTask(**kwargs)
insert_i = 2
for (i, task) in enumerate(fw.tasks):
for i, task in enumerate(fw.tasks):
if task.fw_name == "{{atomate.vasp.firetasks.run_calc.RunVaspCustodian}}":
insert_i = i
break
Expand Down
6 changes: 6 additions & 0 deletions src/mpmorph/flows/md_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@
M3GNET_MD_CONVERGED_VOL_FLOW = "M3GNET_MD_CONVERGED_VOL_FLOW"
LAMMPS_VOL_FLOW = "LAMMPS_VOL_FLOW"


# def get_md_temperature_sweeping(structure, temp, steps, converge_first = True, initial_vol_scale = 1, **input_kwargs):


def get_md_flow_m3gnet(structure, temp, steps, converge_first = True, initial_vol_scale = 1, **input_kwargs):
inputs = M3GNetMDInputs(
temperature=temp,
Expand Down Expand Up @@ -91,3 +95,5 @@ def _get_converge_flow(structure: Structure, pv_md_maker: PVFromCalc, production
flow = Flow([equil_vol_job, final_md_job], output=final_md_job.output, name=M3GNET_MD_CONVERGED_VOL_FLOW)

return flow


33 changes: 17 additions & 16 deletions src/mpmorph/flows/vt_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

VOLUME_TEMPERATURE_SWEEP = "VOLUME_TEMPERATURE_SWEEP"


def get_vt_sweep_flow(
structure,
lower_bound=100,
Expand All @@ -16,35 +17,34 @@ def get_vt_sweep_flow(
output_name="vt.out",
steps=2000,
):

vs = []
volume_jobs = []
temps = list(range(lower_bound, upper_bound, temp_step))

for temp in temps:
job = get_equil_vol_flow(
structure=structure,
temp=temp,
steps=steps
)
job = get_equil_vol_flow(structure=structure, temp=temp, steps=steps)
volume_jobs.append(job)
vs.append(job.output.volume)

collect_job = _collect_vt_results(vs, temps, structure, output_name)

new_flow = Flow([*volume_jobs, collect_job], output=collect_job.output, name=VOLUME_TEMPERATURE_SWEEP)
new_flow = Flow(
[*volume_jobs, collect_job],
output=collect_job.output,
name=VOLUME_TEMPERATURE_SWEEP,
)
return new_flow


def get_vt_sweep_flow_lammps(
structure,
lower_bound=100,
upper_bound=1100,
temp_step=100,
output_name="vt.out",
steps=2000,
mp_id=None
mp_id=None,
):

v_outputs = []
volume_jobs = []
temps = list(range(lower_bound, upper_bound, temp_step))
Expand All @@ -60,9 +60,10 @@ def get_vt_sweep_flow_lammps(

collect_job = _collect_vt_results(v_outputs, temps, structure, output_name, mp_id)


flow_name = f'{structure.composition.reduced_formula}-Melting Point'
new_flow = Flow([*volume_jobs, collect_job], output=collect_job.output, name=flow_name)
flow_name = f"{structure.composition.reduced_formula}-Melting Point"
new_flow = Flow(
[*volume_jobs, collect_job], output=collect_job.output, name=flow_name
)
return new_flow


Expand All @@ -75,18 +76,18 @@ def _collect_vt_results(v_outputs, ts, structure, output_fn, mp_id):
"mp_id": mp_id,
"reduced_formula": structure.composition.reduced_formula,
"formula": structure.composition.formula,
"uuid": str(uuid.uuid4())
"uuid": str(uuid.uuid4()),
}

with open(output_fn, "+w") as f:
f.write(json.dumps(result))
return result

def get_converged_vol(v_output):

def get_converged_vol(v_output):
df = pd.DataFrame.from_dict(v_output)
total_steps = (len(df) - 1) * 10
avging_window = int(total_steps / 30)
vols = df.iloc[-avging_window::]['vol']
vols = df.iloc[-avging_window::]["vol"]
eq_vol = vols.values.mean()
return float(eq_vol)

8 changes: 4 additions & 4 deletions src/mpmorph/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ def get_string_from_struct(
):
format_str = "{{:.{0}f}}".format(significant_figures)

for (si, structure) in enumerate(structures):
for si, structure in enumerate(structures):
lines = [system, "1.0", str(structure.lattice)]
lines.append(" ".join(self.get_site_symbols(structure)))
lines.append(" ".join([str(x) for x in self.get_natoms(structure)]))

lines.append("Direct configuration= " + str(si + 1))
for (i, site) in enumerate(structure):
for i, site in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
line += " " + site.species_string
Expand Down Expand Up @@ -72,9 +72,9 @@ def get_string(self, system="unknown system", significant_figures=6):
# positions = np.add(self.trajectory[0].frac_coords, self.trajectory.displacements)
atoms = [site.specie.symbol for site in self.trajectory[0]]

for (si, position_array) in enumerate(positions):
for si, position_array in enumerate(positions):
lines.append("Direct configuration= " + str(si + 1))
for (i, coords) in enumerate(position_array):
for i, coords in enumerate(position_array):
line = " ".join([format_str.format(c) for c in coords])
line += " " + atoms[i]
lines.append(line)
Expand Down
1 change: 0 additions & 1 deletion src/mpmorph/jobs/equilibrate_volume.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ class EquilibriumVolumeSearchMaker(Maker):
def make(
self, original_structure: Structure, md_pv_data_docs: List[MDPVDataDoc] = None
):

if md_pv_data_docs is not None and len(md_pv_data_docs) > MAX_MD_JOBS:
raise RuntimeError(
"Maximum number of jobs for equilibrium volume search exceeded"
Expand Down
Loading

0 comments on commit 0fc5ce9

Please sign in to comment.