Skip to content

Commit

Permalink
Merge branch 'mir-group:main' into stratified_metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
kavanase authored Jul 10, 2024
2 parents 2ae5d7f + d3a7763 commit 1059248
Show file tree
Hide file tree
Showing 7 changed files with 30 additions and 19 deletions.
1 change: 1 addition & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install setuptools wheel
if [ ${TORCH} = "1.13.1" ]; then pip install numpy==1.*; fi # older torch versions fail with numpy 2
pip install torch==${TORCH} -f https://download.pytorch.org/whl/cpu/torch_stable.html
pip install h5py scikit-learn # install packages that aren't required dependencies but that the tests do need
pip install --upgrade-strategy only-if-needed .
Expand Down
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
Most recent change on the bottom.


## Unreleased - 0.6.1
## Unreleased


## [0.6.1] - 2024-7-9
### Added
- add support for equivariance testing of arbitrary Cartesian tensor outputs
- [Breaking] use entry points for `nequip.extension`s (e.g. for field registration)
Expand Down
10 changes: 4 additions & 6 deletions nequip/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,14 @@
torch_version = packaging.version.parse(torch.__version__)

# only allow 1.11*, 1.13* or higher (no 1.12.*)
assert (torch_version > packaging.version.parse("1.11.0")) and not (
packaging.version.parse("1.12.0")
<= torch_version
< packaging.version.parse("1.13.0")
assert (torch_version == packaging.version.parse("1.11")) or (
torch_version >= packaging.version.parse("1.13")
), f"NequIP supports PyTorch 1.11.* or 1.13.* or later, but {torch_version} found"

# warn if using 1.13* or 2.0.*
if packaging.version.parse("1.13.0") <= torch_version < packaging.version.parse("2.1"):
if packaging.version.parse("1.13.0") <= torch_version:
warnings.warn(
f"!! PyTorch version {torch_version} found. Upstream issues in PyTorch versions 1.13.* and 2.0.* have been seen to cause unusual performance degredations on some CUDA systems that become worse over time; see https://github.com/mir-group/nequip/discussions/311. The best tested PyTorch version to use with CUDA devices is 1.11; while using other versions if you observe this problem, an unexpected lack of this problem, or other strange behavior, please post in the linked GitHub issue."
f"!! PyTorch version {torch_version} found. Upstream issues in PyTorch versions 1.13.* and 2.* have been seen to cause unusual performance degredations on some CUDA systems that become worse over time; see https://github.com/mir-group/nequip/discussions/311. The best tested PyTorch version to use with CUDA devices is 1.11; while using other versions if you observe this problem, an unexpected lack of this problem, or other strange behavior, please post in the linked GitHub issue."
)


Expand Down
2 changes: 1 addition & 1 deletion nequip/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
# See Python packaging guide
# https://packaging.python.org/guides/single-sourcing-package-version/

__version__ = "0.6.0"
__version__ = "0.6.1"
14 changes: 12 additions & 2 deletions nequip/utils/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import torch
from e3nn import o3
from e3nn.util.test import equivariance_error, FLOAT_TOLERANCE
from e3nn.util.test import equivariance_error

from nequip.nn import GraphModuleMixin, GraphModel
from nequip.data import (
Expand All @@ -12,7 +12,17 @@
_EDGE_FIELDS,
_CARTESIAN_TENSOR_FIELDS,
)

from nequip.utils.misc import dtype_from_name

# The default float tolerance
FLOAT_TOLERANCE = {
t: torch.as_tensor(v, dtype=dtype_from_name(t))
for t, v in {"float32": 1e-3, "float64": 1e-10}.items()
}
# Allow lookup by name or dtype object:
for t, v in list(FLOAT_TOLERANCE.items()):
FLOAT_TOLERANCE[dtype_from_name(t)] = v
del t, v

# This has to be somewhat large because of float32 sum reductions over many edges/atoms
PERMUTATION_FLOAT_TOLERANCE = {torch.float32: 1e-4, torch.float64: 1e-10}
Expand Down
9 changes: 1 addition & 8 deletions nequip/utils/unittests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,11 @@

import torch

from nequip.utils.test import set_irreps_debug
from nequip.utils.test import set_irreps_debug, FLOAT_TOLERANCE
from nequip.data import AtomicData, ASEDataset
from nequip.data.transforms import TypeMapper
from nequip.utils.torch_geometric import Batch
from nequip.utils._global_options import _set_global_options
from nequip.utils.misc import dtype_from_name

# Sometimes we run parallel using pytest-xdist, and want to be able to use
# as many GPUs as are available
Expand All @@ -42,12 +41,6 @@
# Test parallelization, but don't waste time spawning tons of workers if lots of cores available
os.environ["NEQUIP_NUM_TASKS"] = "2"

# The default float tolerance
FLOAT_TOLERANCE = {
t: torch.as_tensor(v, dtype=dtype_from_name(t))
for t, v in {"float32": 1e-3, "float64": 1e-10}.items()
}


@pytest.fixture(scope="session", autouse=True, params=["float32", "float64"])
def float_tolerance(request):
Expand Down
8 changes: 7 additions & 1 deletion nequip/utils/unittests/model_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,13 @@ def test_equivariance(self, model, atomic_batch, device):
instance, out_fields = model
instance = instance.to(device=device)
atomic_batch = atomic_batch.to(device=device)
assert_AtomicData_equivariant(func=instance, data_in=atomic_batch)
assert_AtomicData_equivariant(
func=instance,
data_in=atomic_batch,
e3_tolerance={torch.float32: 1e-3, torch.float64: 1e-8}[
torch.get_default_dtype()
],
)

def test_embedding_cutoff(self, model, config, device):
instance, out_fields = model
Expand Down

0 comments on commit 1059248

Please sign in to comment.