Skip to content

Commit

Permalink
Merge pull request #94 from usnistgov/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
knc6 authored Apr 2, 2023
2 parents 736ec73 + 93457c4 commit 074c0b8
Show file tree
Hide file tree
Showing 6 changed files with 79 additions and 28 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ jobs:
export DGLBACKEND=pytorch
export CUDA_VISIBLE_DEVICES="-1"
#pip install dgl-cu111
pip install flake8 pytest pycodestyle pydocstyle codecov pytest-cov coverage
pip install flake8 pytest pycodestyle pydocstyle codecov pytest-cov coverage
#pip uninstall -y torch nvidia-cublas-cu11 nvidia-cuda-nvrtc-cu11 nvidia-cuda-runtime-cu11 nvidia-cudnn-cu11
#conda install -y pytorch-cpu
#pip install attrs==22.1.0 certifi==2022.9.24 charset-normalizer==2.1.1 codecov==2.1.12 contourpy==1.0.5 coverage==6.5.0 cycler==0.11.0 dgl==0.9.1 flake8==5.0.4 fonttools==4.38.0 idna==3.4 iniconfig==1.1.1 jarvis-tools==2022.9.16 joblib==1.2.0 kiwisolver==1.4.4 matplotlib==3.6.1 mccabe==0.7.0 networkx==3.0b1 numpy==1.23.4 packaging==21.3 pandas==1.5.1 Pillow==9.2.0 pluggy==1.0.0 psutil==5.9.3 py==1.11.0 pycodestyle==2.9.1 pydantic==1.10.2 pydocstyle==6.1.1 pyflakes==2.5.0 pyparsing==2.4.7 pytest==7.1.3 pytest-cov==4.0.0 python-dateutil==2.8.2 pytorch-ignite==0.5.0.dev20221024 pytz==2022.5 requests==2.28.1 scikit-learn==1.1.2 scipy==1.9.3 six==1.16.0 snowballstemmer==2.2.0 spglib==2.0.1 threadpoolctl==3.1.0 tomli==2.0.1 toolz==0.12.0 torch==1.12.1 tqdm==4.64.1 typing_extensions==4.4.0 urllib3==1.26.12 xmltodict==0.13.0
echo 'PIP freeze'
pip freeze
coverage run -m pytest
coverage report -m
coverage report -m -i
codecov
codecov --token="85bd9c5d-9e55-4f6d-bd69-350ee5e3bb41"
echo 'Train folder'
Expand Down
46 changes: 43 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
* [Pre-trained models](#pretrained)
* [Quick start using colab](#colab)
* [JARVIS-ALIGNN webapp](#webapp)
* [ALIGNN-FF](#alignnff)
* [ALIGNN-FF & ASE Calculator](#alignnff)
* [Peformances on a few datasets](#performances)
* [Useful notes](#notes)
* [References](#refs)
Expand Down Expand Up @@ -166,9 +166,44 @@ A basic web-app is for direct-prediction available at [JARVIS-ALIGNN app](https:
ALIGNN-FF
-------------------------

[ASE calculator](https://wiki.fysik.dtu.dk/ase/ase/calculators/calculators.html) provides interface to various codes. An example for ALIGNN-FF is give below:

```
from alignn.ff.ff import AlignnAtomwiseCalculator,default_path
model_path = default_path()
calc = AlignnAtomwiseCalculator(path=model_path)
from ase import Atom, Atoms
import numpy as np
import matplotlib.pyplot as plt
lattice_params = np.linspace(3.5, 3.8)
fcc_energies = []
ready = True
for a in lattice_params:
atoms = Atoms([Atom('Cu', (0, 0, 0))],
cell=0.5 * a * np.array([[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0]]),
pbc=True)
atoms.set_tags(np.ones(len(atoms)))
atoms.calc = calc
e = atoms.get_potential_energy()
fcc_energies.append(e)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(lattice_params, fcc_energies)
plt.title('1x1x1')
plt.xlabel('Lattice constant ($\AA$)')
plt.ylabel('Total energy (eV)')
plt.show()
```

To train ALIGNN-FF use `train_folder_ff.py` script which uses `atomwise_alignn` model:

AtomWise prediction example which looks for similar setup as before but unstead of `id_prop.csv`, it requires `id_prop.json` file (see example in the sample_data_ff directory):
AtomWise prediction example which looks for similar setup as before but unstead of `id_prop.csv`, it requires `id_prop.json` file (see example in the sample_data_ff directory).:

```
train_folder_ff.py --root_dir "alignn/examples/sample_data_ff" --config "alignn/examples/sample_data_ff/config_example_atomwise.json" --output_dir=temp
Expand All @@ -189,6 +224,7 @@ run_alignn_ff.py -h
```



<a name="performances"></a>

Performances
Expand Down Expand Up @@ -307,7 +343,11 @@ coming soon!

### 10) On OpenCatalyst dataset

coming soon!
[On 10k dataset](https://github.com/Open-Catalyst-Project/ocp/blob/main/MODELS.md#is2re-models):

CGCNN MAE: 0.988

ALIGNN MAE: 0.61


<a name="notes"></a>
Expand Down
2 changes: 1 addition & 1 deletion alignn/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
"""Version number."""
__version__ = "2023.01.10"
__version__ = "2023.04.01"
12 changes: 12 additions & 0 deletions alignn/pretrained.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,18 @@
"https://figshare.com/ndownloader/files/38789199",
1,
],
"jv_supercon_edos_alignn": [
"https://figshare.com/ndownloader/files/39946300",
1,
],
"jv_supercon_debye_alignn": [
"https://figshare.com/ndownloader/files/39946297",
1,
],
"jv_supercon_a2F_alignn": [
"https://figshare.com/ndownloader/files/38801886",
100,
],
"mp_e_form_alignnn": [
"https://figshare.com/ndownloader/files/31458811",
1,
Expand Down
37 changes: 18 additions & 19 deletions alignn/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -1012,26 +1012,25 @@ def es_score(engine):
mean_absolute_error(np.array(targets), np.array(predictions)),
)
if config.store_outputs and not classification:
x = []
y = []
for i in history["EOS"]:
x.append(i[0].cpu().numpy().tolist())
y.append(i[1].cpu().numpy().tolist())
x = np.array(x, dtype="float").flatten()
y = np.array(y, dtype="float").flatten()
f = open(
os.path.join(
config.output_dir, "prediction_results_train_set.csv"
),
"w",
)
# save training targets and predictions here
# TODO: Add IDs
f.write("target,prediction\n")
for i, j in zip(x, y):
f.write("%6f, %6f\n" % (j, i))
line = str(i) + "," + str(j) + "\n"
f.write(line)
f.close()
resultsfile = os.path.join(
config.output_dir, "prediction_results_train_set.csv"
)

target_vals, predictions = [], []

for tgt, pred in history["trainEOS"]:
target_vals.append(tgt.cpu().numpy().tolist())
predictions.append(pred.cpu().numpy().tolist())

target_vals = np.array(target_vals, dtype="float").flatten()
predictions = np.array(predictions, dtype="float").flatten()

with open(resultsfile, "w") as f:
print("target,prediction", file=f)
for target_val, predicted_val in zip(target_vals, predictions):
print(f"{target_val}, {predicted_val}", file=f)

# TODO: Fix IDs for train loader
"""
Expand Down
6 changes: 3 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,21 @@

setuptools.setup(
name="alignn",
version="2023.01.10",
version="2023.04.01",
author="Kamal Choudhary, Brian DeCost",
author_email="[email protected]",
description="alignn",
install_requires=[
"numpy>=1.19.5",
"scipy>=1.6.1",
"jarvis-tools>=2021.07.19",
"torch==1.12.0",
"torch>=1.8",
"dgl>=0.6.0",
"scikit-learn>=0.22.2",
"matplotlib>=3.4.1",
"tqdm>=4.60.0",
"pandas>=1.2.3",
"pytorch-ignite==0.5.0.dev20221024",
"pytorch-ignite>=0.5.0.dev20221024",
"pydantic>=1.8.1",
"flake8>=3.9.1",
"pycodestyle>=2.7.0",
Expand Down

0 comments on commit 074c0b8

Please sign in to comment.