From 0f7bce400ee461606588f2b6cf777bd620a990cc Mon Sep 17 00:00:00 2001 From: Agriya Khetarpal <74401230+agriyakhetarpal@users.noreply.github.com> Date: Tue, 27 Aug 2024 00:21:54 +0530 Subject: [PATCH] Format and sort imports, run ruff rules --- autograd/__init__.py | 29 +++++++++++---------- autograd/builtins.py | 14 +++++----- autograd/core.py | 9 +++---- autograd/differential_operators.py | 12 ++++----- autograd/extend.py | 20 +++++++------- autograd/misc/__init__.py | 2 +- autograd/misc/fixed_points.py | 4 +-- autograd/misc/flatten.py | 2 +- autograd/misc/optimizers.py | 1 - autograd/misc/tracers.py | 7 ++--- autograd/numpy/__init__.py | 8 +----- autograd/numpy/fft.py | 8 +++--- autograd/numpy/linalg.py | 7 +++-- autograd/numpy/numpy_boxes.py | 4 ++- autograd/numpy/numpy_jvps.py | 16 +++++++----- autograd/numpy/numpy_vjps.py | 5 +++- autograd/numpy/numpy_vspaces.py | 3 ++- autograd/numpy/numpy_wrapper.py | 9 +++---- autograd/numpy/random.py | 1 + autograd/scipy/__init__.py | 5 +--- autograd/scipy/integrate.py | 4 +-- autograd/scipy/linalg.py | 3 ++- autograd/scipy/misc.py | 1 + autograd/scipy/signal.py | 7 ++--- autograd/scipy/special.py | 5 ++-- autograd/scipy/stats/__init__.py | 7 +---- autograd/scipy/stats/beta.py | 5 ++-- autograd/scipy/stats/chi2.py | 5 ++-- autograd/scipy/stats/dirichlet.py | 2 +- autograd/scipy/stats/gamma.py | 5 ++-- autograd/scipy/stats/multivariate_normal.py | 3 +-- autograd/scipy/stats/norm.py | 3 ++- autograd/scipy/stats/poisson.py | 5 ++-- autograd/scipy/stats/t.py | 3 ++- autograd/test_util.py | 11 +++----- autograd/tracer.py | 3 ++- autograd/util.py | 2 -- benchmarks/bench_core.py | 7 ++--- benchmarks/bench_numpy_vjps.py | 3 +-- benchmarks/bench_rnn.py | 2 +- benchmarks/bench_util.py | 2 +- examples/bayesian_neural_net.py | 3 +-- examples/bayesian_optimization.py | 4 +-- examples/black_box_svi.py | 1 - examples/convnet.py | 2 +- examples/data.py | 4 +-- examples/data_mnist.py | 9 +++---- examples/deep_gaussian_process.py | 5 ++-- examples/define_gradient.py | 3 +-- examples/dot_graph.py | 3 +-- examples/fluidsim/fluidsim.py | 12 ++++----- examples/fluidsim/wing.py | 8 +++--- examples/gaussian_process.py | 4 +-- examples/generative_adversarial_net.py | 5 ++-- examples/gmm.py | 8 +++--- examples/gplvm.py | 7 +++-- examples/hmm_em.py | 9 ++++--- examples/ica.py | 5 ++-- examples/lstm.py | 7 ++--- examples/mixture_variational_inference.py | 3 +-- examples/natural_gradient_black_box_svi.py | 7 +++-- examples/negative_binomial_maxlike.py | 7 +++-- examples/neural_net.py | 5 ++-- examples/neural_net_regression.py | 1 - examples/ode_net.py | 5 ++-- examples/print_trace.py | 2 +- examples/rkhs.py | 4 +-- examples/rnn.py | 6 ++--- examples/rosenbrock.py | 3 ++- examples/sinusoid.py | 3 ++- examples/tanh.py | 3 ++- examples/variational_autoencoder.py | 6 ++--- tests/_test_complexity.py | 3 ++- tests/numpy_utils.py | 3 --- tests/profiling.py | 8 +++--- tests/test_binary_ops.py | 7 ++--- tests/test_complex.py | 2 +- tests/test_core.py | 1 + tests/test_dict.py | 8 +++--- tests/test_direct.py | 5 ++-- tests/test_fft.py | 5 ++-- tests/test_graphs.py | 8 +++--- tests/test_jacobian.py | 2 +- tests/test_linalg.py | 10 +++---- tests/test_list.py | 5 ++-- tests/test_logic.py | 8 +++--- tests/test_misc.py | 4 +-- tests/test_numpy.py | 5 ++-- tests/test_scalar_ops.py | 2 +- tests/test_scipy.py | 15 ++++++----- tests/test_systematic.py | 8 +++--- tests/test_tests.py | 5 ++-- tests/test_truediv.py | 4 +-- tests/test_tuple.py | 5 ++-- tests/test_vspaces.py | 7 ++--- tests/test_wrappers.py | 27 +++++++++---------- 96 files changed, 288 insertions(+), 277 deletions(-) diff --git a/autograd/__init__.py b/autograd/__init__.py index 3e20e914c..c177927d1 100644 --- a/autograd/__init__.py +++ b/autograd/__init__.py @@ -1,23 +1,24 @@ +from autograd.core import primitive_with_deprecation_warnings as primitive + +from .builtins import dict, isinstance, list, tuple, type from .differential_operators import ( - make_vjp, - grad, - multigrad_dict, + checkpoint, + deriv, elementwise_grad, - value_and_grad, + grad, grad_and_aux, + grad_named, + hessian, hessian_tensor_product, hessian_vector_product, - hessian, + holomorphic_grad, jacobian, - tensor_jacobian_product, - vector_jacobian_product, - grad_named, - checkpoint, + make_ggnvp, make_hvp, make_jvp, - make_ggnvp, - deriv, - holomorphic_grad, + make_vjp, + multigrad_dict, + tensor_jacobian_product, + value_and_grad, + vector_jacobian_product, ) -from .builtins import isinstance, type, tuple, list, dict -from autograd.core import primitive_with_deprecation_warnings as primitive diff --git a/autograd/builtins.py b/autograd/builtins.py index 223e92217..e62d8457c 100644 --- a/autograd/builtins.py +++ b/autograd/builtins.py @@ -1,16 +1,16 @@ -from .util import subvals from .extend import ( Box, - primitive, - notrace_primitive, - VSpace, - vspace, SparseObject, - defvjp, - defvjp_argnum, + VSpace, defjvp, defjvp_argnum, + defvjp, + defvjp_argnum, + notrace_primitive, + primitive, + vspace, ) +from .util import subvals isinstance_ = isinstance isinstance = notrace_primitive(isinstance) diff --git a/autograd/core.py b/autograd/core.py index 3accbcf3c..e3740b802 100644 --- a/autograd/core.py +++ b/autograd/core.py @@ -1,6 +1,7 @@ -from itertools import count from functools import reduce -from .tracer import trace, primitive, toposort, Node, Box, isbox, getval +from itertools import count + +from .tracer import Box, Node, getval, isbox, primitive, toposort, trace from .util import func, subval # -------------------- reverse mode -------------------- @@ -40,9 +41,7 @@ def __init__(self, value, fun, args, kwargs, parent_argnums, parents): vjpmaker = primitive_vjps[fun] except KeyError: fun_name = getattr(fun, "__name__", fun) - raise NotImplementedError( - f"VJP of {fun_name} wrt argnums {parent_argnums} not defined" - ) + raise NotImplementedError(f"VJP of {fun_name} wrt argnums {parent_argnums} not defined") self.vjp = vjpmaker(parent_argnums, value, args, kwargs) def initialize_root(self): diff --git a/autograd/differential_operators.py b/autograd/differential_operators.py index 7107571df..21c35eed6 100644 --- a/autograd/differential_operators.py +++ b/autograd/differential_operators.py @@ -1,6 +1,5 @@ """Convenience functions built on top of `make_vjp`.""" -from functools import partial from collections import OrderedDict try: @@ -9,13 +8,14 @@ from inspect import getargspec as _getargspec # Python 2 import warnings -from .wrap_util import unary_to_nary -from .builtins import tuple as atuple -from .core import make_vjp as _make_vjp, make_jvp as _make_jvp -from .extend import primitive, defvjp_argnum, vspace - import autograd.numpy as np +from .builtins import tuple as atuple +from .core import make_jvp as _make_jvp +from .core import make_vjp as _make_vjp +from .extend import defvjp_argnum, primitive, vspace +from .wrap_util import unary_to_nary + make_vjp = unary_to_nary(_make_vjp) make_jvp = unary_to_nary(_make_jvp) diff --git a/autograd/extend.py b/autograd/extend.py index 9cc4eee95..7c0638ee1 100644 --- a/autograd/extend.py +++ b/autograd/extend.py @@ -1,16 +1,16 @@ # Exposes API for extending autograd -from .tracer import Box, primitive, register_notrace, notrace_primitive from .core import ( + JVPNode, SparseObject, - VSpace, - vspace, VJPNode, - JVPNode, - defvjp_argnums, - defvjp_argnum, - defvjp, - defjvp_argnums, - defjvp_argnum, - defjvp, + VSpace, def_linear, + defjvp, + defjvp_argnum, + defjvp_argnums, + defvjp, + defvjp_argnum, + defvjp_argnums, + vspace, ) +from .tracer import Box, notrace_primitive, primitive, register_notrace diff --git a/autograd/misc/__init__.py b/autograd/misc/__init__.py index b455b460c..4f77dbf48 100644 --- a/autograd/misc/__init__.py +++ b/autograd/misc/__init__.py @@ -1,2 +1,2 @@ -from .tracers import const_graph from .flatten import flatten +from .tracers import const_graph diff --git a/autograd/misc/fixed_points.py b/autograd/misc/fixed_points.py index 2383b2d79..aab87a2da 100644 --- a/autograd/misc/fixed_points.py +++ b/autograd/misc/fixed_points.py @@ -1,6 +1,6 @@ -from autograd.extend import primitive, defvjp, vspace -from autograd.builtins import tuple from autograd import make_vjp +from autograd.builtins import tuple +from autograd.extend import defvjp, primitive, vspace @primitive diff --git a/autograd/misc/flatten.py b/autograd/misc/flatten.py index 09b4d5670..7a600d7a3 100644 --- a/autograd/misc/flatten.py +++ b/autograd/misc/flatten.py @@ -3,9 +3,9 @@ arrays. The main purpose is to make examples and optimizers simpler. """ +import autograd.numpy as np from autograd import make_vjp from autograd.builtins import type -import autograd.numpy as np def flatten(value): diff --git a/autograd/misc/optimizers.py b/autograd/misc/optimizers.py index 16bce6c0e..d2ee2d70d 100644 --- a/autograd/misc/optimizers.py +++ b/autograd/misc/optimizers.py @@ -7,7 +7,6 @@ These routines can optimize functions whose inputs are structured objects, such as dicts of numpy arrays.""" - import autograd.numpy as np from autograd.misc import flatten from autograd.wrap_util import wraps diff --git a/autograd/misc/tracers.py b/autograd/misc/tracers.py index fe4af4d1e..20dd555fd 100644 --- a/autograd/misc/tracers.py +++ b/autograd/misc/tracers.py @@ -1,8 +1,9 @@ +from functools import partial from itertools import repeat -from autograd.wrap_util import wraps + +from autograd.tracer import Node, trace from autograd.util import subvals, toposort -from autograd.tracer import trace, Node -from functools import partial +from autograd.wrap_util import wraps class ConstGraphNode(Node): diff --git a/autograd/numpy/__init__.py b/autograd/numpy/__init__.py index 7e5fc37c0..28b3f7a84 100644 --- a/autograd/numpy/__init__.py +++ b/autograd/numpy/__init__.py @@ -1,8 +1,2 @@ +from . import fft, linalg, numpy_boxes, numpy_jvps, numpy_vjps, numpy_vspaces, random from .numpy_wrapper import * -from . import numpy_boxes -from . import numpy_vspaces -from . import numpy_vjps -from . import numpy_jvps -from . import linalg -from . import fft -from . import random diff --git a/autograd/numpy/fft.py b/autograd/numpy/fft.py index b5917b266..e401b4cc8 100644 --- a/autograd/numpy/fft.py +++ b/autograd/numpy/fft.py @@ -1,8 +1,10 @@ import numpy.fft as ffto -from .numpy_wrapper import wrap_namespace -from .numpy_vjps import match_complex + +from autograd.extend import defvjp, primitive, vspace + from . import numpy_wrapper as anp -from autograd.extend import primitive, defvjp, vspace +from .numpy_vjps import match_complex +from .numpy_wrapper import wrap_namespace wrap_namespace(ffto.__dict__, globals()) diff --git a/autograd/numpy/linalg.py b/autograd/numpy/linalg.py index e76fb1447..c69199aa1 100644 --- a/autograd/numpy/linalg.py +++ b/autograd/numpy/linalg.py @@ -1,8 +1,11 @@ from functools import partial + import numpy.linalg as npla -from .numpy_wrapper import wrap_namespace + +from autograd.extend import defjvp, defvjp + from . import numpy_wrapper as anp -from autograd.extend import defvjp, defjvp +from .numpy_wrapper import wrap_namespace wrap_namespace(npla.__dict__, globals()) diff --git a/autograd/numpy/numpy_boxes.py b/autograd/numpy/numpy_boxes.py index afe87c3ab..937037a42 100644 --- a/autograd/numpy/numpy_boxes.py +++ b/autograd/numpy/numpy_boxes.py @@ -1,6 +1,8 @@ import numpy as np -from autograd.extend import Box, primitive + from autograd.builtins import SequenceBox +from autograd.extend import Box, primitive + from . import numpy_wrapper as anp Box.__array_priority__ = 90.0 diff --git a/autograd/numpy/numpy_jvps.py b/autograd/numpy/numpy_jvps.py index 2b0e8754b..4dba485be 100644 --- a/autograd/numpy/numpy_jvps.py +++ b/autograd/numpy/numpy_jvps.py @@ -1,19 +1,21 @@ import numpy as onp + +from autograd.extend import JVPNode, def_linear, defjvp, defjvp_argnum, register_notrace, vspace + +from ..util import func from . import numpy_wrapper as anp +from .numpy_boxes import ArrayBox from .numpy_vjps import ( - untake, balanced_eq, - match_complex, - replace_zero, dot_adjoint_0, dot_adjoint_1, + match_complex, + nograd_functions, + replace_zero, tensordot_adjoint_0, tensordot_adjoint_1, - nograd_functions, + untake, ) -from autograd.extend import defjvp, defjvp_argnum, def_linear, vspace, JVPNode, register_notrace -from ..util import func -from .numpy_boxes import ArrayBox for fun in nograd_functions: register_notrace(JVPNode, fun) diff --git a/autograd/numpy/numpy_vjps.py b/autograd/numpy/numpy_vjps.py index 0481251ec..ddcfc2ce6 100644 --- a/autograd/numpy/numpy_vjps.py +++ b/autograd/numpy/numpy_vjps.py @@ -1,9 +1,12 @@ from functools import partial + import numpy as onp + +from autograd.extend import SparseObject, VJPNode, defvjp, defvjp_argnum, primitive, register_notrace, vspace + from ..util import func from . import numpy_wrapper as anp from .numpy_boxes import ArrayBox -from autograd.extend import primitive, vspace, defvjp, defvjp_argnum, SparseObject, VJPNode, register_notrace # ----- Non-differentiable functions ----- diff --git a/autograd/numpy/numpy_vspaces.py b/autograd/numpy/numpy_vspaces.py index b6d49f196..c1f77f4aa 100644 --- a/autograd/numpy/numpy_vspaces.py +++ b/autograd/numpy/numpy_vspaces.py @@ -1,6 +1,7 @@ import numpy as np -from autograd.extend import VSpace + from autograd.builtins import NamedTupleVSpace +from autograd.extend import VSpace class ArrayVSpace(VSpace): diff --git a/autograd/numpy/numpy_wrapper.py b/autograd/numpy/numpy_wrapper.py index 9b8764005..baa0aed35 100644 --- a/autograd/numpy/numpy_wrapper.py +++ b/autograd/numpy/numpy_wrapper.py @@ -1,8 +1,9 @@ -import types import warnings -from autograd.extend import primitive, notrace_primitive + import numpy as _np + import autograd.builtins as builtins +from autograd.extend import notrace_primitive, primitive if _np.lib.NumpyVersion(_np.__version__) >= "2.0.0": from numpy._core.einsumfunc import _parse_einsum_input @@ -75,9 +76,7 @@ def array(A, *args, **kwargs): def wrap_if_boxes_inside(raw_array, slow_op_name=None): if raw_array.dtype is _np.dtype("O"): if slow_op_name: - warnings.warn( - "{} is slow for array inputs. " "np.concatenate() is faster.".format(slow_op_name) - ) + warnings.warn("{} is slow for array inputs. " "np.concatenate() is faster.".format(slow_op_name)) return array_from_args((), {}, *raw_array.ravel()).reshape(raw_array.shape) else: return raw_array diff --git a/autograd/numpy/random.py b/autograd/numpy/random.py index f8097e3c0..7f78ce81a 100644 --- a/autograd/numpy/random.py +++ b/autograd/numpy/random.py @@ -1,4 +1,5 @@ import numpy.random as npr + from .numpy_wrapper import wrap_namespace wrap_namespace(npr.__dict__, globals()) diff --git a/autograd/scipy/__init__.py b/autograd/scipy/__init__.py index ace8784e9..81273ad5c 100644 --- a/autograd/scipy/__init__.py +++ b/autograd/scipy/__init__.py @@ -1,7 +1,4 @@ -from . import integrate -from . import signal -from . import special -from . import stats +from . import integrate, signal, special, stats try: from . import misc diff --git a/autograd/scipy/integrate.py b/autograd/scipy/integrate.py index 9364f0563..401e5a971 100644 --- a/autograd/scipy/integrate.py +++ b/autograd/scipy/integrate.py @@ -1,10 +1,10 @@ import scipy.integrate import autograd.numpy as np -from autograd.extend import primitive, defvjp_argnums from autograd import make_vjp -from autograd.misc import flatten from autograd.builtins import tuple +from autograd.extend import defvjp_argnums, primitive +from autograd.misc import flatten odeint = primitive(scipy.integrate.odeint) diff --git a/autograd/scipy/linalg.py b/autograd/scipy/linalg.py index b9f1d08e3..d2cc886a9 100644 --- a/autograd/scipy/linalg.py +++ b/autograd/scipy/linalg.py @@ -1,9 +1,10 @@ from functools import partial + import scipy.linalg import autograd.numpy as anp +from autograd.extend import defjvp, defjvp_argnums, defvjp, defvjp_argnums from autograd.numpy.numpy_wrapper import wrap_namespace -from autograd.extend import defvjp, defvjp_argnums, defjvp, defjvp_argnums wrap_namespace(scipy.linalg.__dict__, globals()) # populates module namespace diff --git a/autograd/scipy/misc.py b/autograd/scipy/misc.py index 4b75d0307..4271c30e5 100644 --- a/autograd/scipy/misc.py +++ b/autograd/scipy/misc.py @@ -1,4 +1,5 @@ import scipy.misc as osp_misc + from ..scipy import special if hasattr(osp_misc, "logsumexp"): diff --git a/autograd/scipy/signal.py b/autograd/scipy/signal.py index 2e94de52e..49b76194c 100644 --- a/autograd/scipy/signal.py +++ b/autograd/scipy/signal.py @@ -1,10 +1,11 @@ from functools import partial -import autograd.numpy as np -import numpy as npo # original numpy -from autograd.extend import primitive, defvjp +import numpy as npo # original numpy from numpy.lib.stride_tricks import as_strided +import autograd.numpy as np +from autograd.extend import defvjp, primitive + @primitive def convolve(A, B, axes=None, dot_axes=[(), ()], mode="full"): diff --git a/autograd/scipy/special.py b/autograd/scipy/special.py index aaa1930cd..2e435900a 100644 --- a/autograd/scipy/special.py +++ b/autograd/scipy/special.py @@ -1,7 +1,8 @@ import scipy.special + import autograd.numpy as np -from autograd.extend import primitive, defvjp, defjvp -from autograd.numpy.numpy_vjps import unbroadcast_f, repeat_to_match_shape +from autograd.extend import defjvp, defvjp, primitive +from autograd.numpy.numpy_vjps import repeat_to_match_shape, unbroadcast_f ### Beta function ### beta = primitive(scipy.special.beta) diff --git a/autograd/scipy/stats/__init__.py b/autograd/scipy/stats/__init__.py index fd02bb692..80e50cc91 100644 --- a/autograd/scipy/stats/__init__.py +++ b/autograd/scipy/stats/__init__.py @@ -1,9 +1,4 @@ -from . import chi2 -from . import beta -from . import gamma -from . import norm -from . import poisson -from . import t +from . import beta, chi2, gamma, norm, poisson, t # Try block needed in case the user has an # old version of scipy without multivariate normal. diff --git a/autograd/scipy/stats/beta.py b/autograd/scipy/stats/beta.py index 1e09fd211..4c2716095 100644 --- a/autograd/scipy/stats/beta.py +++ b/autograd/scipy/stats/beta.py @@ -1,6 +1,7 @@ -import autograd.numpy as np import scipy.stats -from autograd.extend import primitive, defvjp + +import autograd.numpy as np +from autograd.extend import defvjp, primitive from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import beta, psi diff --git a/autograd/scipy/stats/chi2.py b/autograd/scipy/stats/chi2.py index 25ff1ba79..e15b11631 100644 --- a/autograd/scipy/stats/chi2.py +++ b/autograd/scipy/stats/chi2.py @@ -1,6 +1,7 @@ -import autograd.numpy as np import scipy.stats -from autograd.extend import primitive, defvjp + +import autograd.numpy as np +from autograd.extend import defvjp, primitive from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import gamma diff --git a/autograd/scipy/stats/dirichlet.py b/autograd/scipy/stats/dirichlet.py index b5ce1fccb..238ac5ade 100644 --- a/autograd/scipy/stats/dirichlet.py +++ b/autograd/scipy/stats/dirichlet.py @@ -1,8 +1,8 @@ import scipy.stats import autograd.numpy as np +from autograd.extend import defvjp, primitive from autograd.scipy.special import digamma -from autograd.extend import primitive, defvjp rvs = primitive(scipy.stats.dirichlet.rvs) pdf = primitive(scipy.stats.dirichlet.pdf) diff --git a/autograd/scipy/stats/gamma.py b/autograd/scipy/stats/gamma.py index 601873697..5004905ec 100644 --- a/autograd/scipy/stats/gamma.py +++ b/autograd/scipy/stats/gamma.py @@ -1,6 +1,7 @@ -import autograd.numpy as np import scipy.stats -from autograd.extend import primitive, defvjp + +import autograd.numpy as np +from autograd.extend import defvjp, primitive from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import gamma, psi diff --git a/autograd/scipy/stats/multivariate_normal.py b/autograd/scipy/stats/multivariate_normal.py index 42118ab9b..170823eef 100644 --- a/autograd/scipy/stats/multivariate_normal.py +++ b/autograd/scipy/stats/multivariate_normal.py @@ -1,9 +1,8 @@ import scipy.stats import autograd.numpy as np +from autograd.extend import defvjp, primitive from autograd.numpy.numpy_vjps import unbroadcast_f -from autograd.extend import primitive, defvjp - pdf = primitive(scipy.stats.multivariate_normal.pdf) logpdf = primitive(scipy.stats.multivariate_normal.logpdf) diff --git a/autograd/scipy/stats/norm.py b/autograd/scipy/stats/norm.py index e92845eb6..c610c1425 100644 --- a/autograd/scipy/stats/norm.py +++ b/autograd/scipy/stats/norm.py @@ -1,8 +1,9 @@ """Gradients of the normal distribution.""" import scipy.stats + import autograd.numpy as anp -from autograd.extend import primitive, defvjp +from autograd.extend import defvjp, primitive from autograd.numpy.numpy_vjps import unbroadcast_f pdf = primitive(scipy.stats.norm.pdf) diff --git a/autograd/scipy/stats/poisson.py b/autograd/scipy/stats/poisson.py index 8556dc820..c9583c0cc 100644 --- a/autograd/scipy/stats/poisson.py +++ b/autograd/scipy/stats/poisson.py @@ -1,6 +1,7 @@ -import autograd.numpy as np import scipy.stats -from autograd.extend import primitive, defvjp + +import autograd.numpy as np +from autograd.extend import defvjp, primitive from autograd.numpy.numpy_vjps import unbroadcast_f cdf = primitive(scipy.stats.poisson.cdf) diff --git a/autograd/scipy/stats/t.py b/autograd/scipy/stats/t.py index 187ea188c..f9162fce5 100644 --- a/autograd/scipy/stats/t.py +++ b/autograd/scipy/stats/t.py @@ -1,8 +1,9 @@ """Gradients of the univariate t distribution.""" import scipy.stats + import autograd.numpy as np -from autograd.extend import primitive, defvjp +from autograd.extend import defvjp, primitive from autograd.numpy.numpy_vjps import unbroadcast_f from autograd.scipy.special import psi diff --git a/autograd/test_util.py b/autograd/test_util.py index f135a0a86..14437d646 100644 --- a/autograd/test_util.py +++ b/autograd/test_util.py @@ -1,8 +1,7 @@ -from functools import partial from itertools import product -from .core import make_vjp, make_jvp, vspace -from .util import subvals -from .wrap_util import unary_to_nary, get_name + +from .core import make_jvp, make_vjp, vspace +from .wrap_util import get_name, unary_to_nary TOL = 1e-6 RTOL = 1e-6 @@ -57,9 +56,7 @@ def check_equivalent(x, y): x_vs, y_vs = vspace(x), vspace(y) assert x_vs == y_vs, f"VSpace mismatch:\nx: {x_vs}\ny: {y_vs}" v = x_vs.randn() - assert scalar_close( - x_vs.inner_prod(x, v), x_vs.inner_prod(y, v) - ), f"Value mismatch:\nx: {x}\ny: {y}" + assert scalar_close(x_vs.inner_prod(x, v), x_vs.inner_prod(y, v)), f"Value mismatch:\nx: {x}\ny: {y}" @unary_to_nary diff --git a/autograd/tracer.py b/autograd/tracer.py index 95f9834c6..30fa72a6a 100644 --- a/autograd/tracer.py +++ b/autograd/tracer.py @@ -1,6 +1,7 @@ import warnings -from contextlib import contextmanager from collections import defaultdict +from contextlib import contextmanager + from .util import subvals, toposort from .wrap_util import wraps diff --git a/autograd/util.py b/autograd/util.py index 7a00a8f0b..6f73edbe4 100644 --- a/autograd/util.py +++ b/autograd/util.py @@ -1,5 +1,4 @@ import operator -import sys def subvals(x, ivs): @@ -15,7 +14,6 @@ def subval(x, i, v): return tuple(x_) - def func(f): return f diff --git a/benchmarks/bench_core.py b/benchmarks/bench_core.py index 2d9e57d87..9dd5ddd10 100644 --- a/benchmarks/bench_core.py +++ b/benchmarks/bench_core.py @@ -1,14 +1,15 @@ import numpy as onp + import autograd.numpy as np from autograd import grad try: - from autograd.core import vspace, VJPNode, backward_pass - from autograd.tracer import trace, new_box + from autograd.core import VJPNode, backward_pass, vspace + from autograd.tracer import new_box, trace MASTER_BRANCH = False except ImportError: - from autograd.core import vspace, forward_pass, backward_pass, new_progenitor + from autograd.core import backward_pass, forward_pass, new_progenitor, vspace MASTER_BRANCH = True diff --git a/benchmarks/bench_numpy_vjps.py b/benchmarks/bench_numpy_vjps.py index ca01b58f7..3bb58bf13 100644 --- a/benchmarks/bench_numpy_vjps.py +++ b/benchmarks/bench_numpy_vjps.py @@ -1,7 +1,6 @@ -from autograd import make_vjp - import autograd.numpy as np import autograd.numpy.random as npr +from autograd import make_vjp dot_0 = lambda a, b, g: make_vjp(np.dot, argnum=0)(a, b)[0](g) dot_1 = lambda a, b, g: make_vjp(np.dot, argnum=1)(a, b)[0](g) diff --git a/benchmarks/bench_rnn.py b/benchmarks/bench_rnn.py index 7fd0710f5..5635964d0 100644 --- a/benchmarks/bench_rnn.py +++ b/benchmarks/bench_rnn.py @@ -1,8 +1,8 @@ # Write the benchmarking functions here. # See "Writing benchmarks" in the asv docs for more information. # http://asv.readthedocs.io/en/latest/writing_benchmarks.html -from autograd import grad import autograd.numpy as np +from autograd import grad class RNNSuite: diff --git a/benchmarks/bench_util.py b/benchmarks/bench_util.py index e21860627..13952e490 100644 --- a/benchmarks/bench_util.py +++ b/benchmarks/bench_util.py @@ -1,5 +1,5 @@ -import autograd.numpy.random as npr import autograd.numpy as np +import autograd.numpy.random as npr from autograd import grad try: diff --git a/examples/bayesian_neural_net.py b/examples/bayesian_neural_net.py index 284aff625..5082242a8 100644 --- a/examples/bayesian_neural_net.py +++ b/examples/bayesian_neural_net.py @@ -1,9 +1,8 @@ import matplotlib.pyplot as plt +from black_box_svi import black_box_variational_inference import autograd.numpy as np import autograd.numpy.random as npr - -from black_box_svi import black_box_variational_inference from autograd.misc.optimizers import adam diff --git a/examples/bayesian_optimization.py b/examples/bayesian_optimization.py index e6869156d..5a48f881b 100644 --- a/examples/bayesian_optimization.py +++ b/examples/bayesian_optimization.py @@ -2,12 +2,12 @@ to find the next query point.""" import matplotlib.pyplot as plt +from gaussian_process import make_gp_funs, rbf_covariance +from scipy.optimize import minimize import autograd.numpy as np import autograd.numpy.random as npr from autograd import value_and_grad -from scipy.optimize import minimize -from gaussian_process import make_gp_funs, rbf_covariance from autograd.scipy.stats import norm diff --git a/examples/black_box_svi.py b/examples/black_box_svi.py index deae4b398..64de2f773 100644 --- a/examples/black_box_svi.py +++ b/examples/black_box_svi.py @@ -4,7 +4,6 @@ import autograd.numpy.random as npr import autograd.scipy.stats.multivariate_normal as mvn import autograd.scipy.stats.norm as norm - from autograd import grad from autograd.misc.optimizers import adam diff --git a/examples/convnet.py b/examples/convnet.py index c7c41fe40..de2ef1b04 100644 --- a/examples/convnet.py +++ b/examples/convnet.py @@ -1,12 +1,12 @@ """Convolutional neural net on MNIST, modeled on 'LeNet-5', http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf""" +import data_mnist import autograd.numpy as np import autograd.numpy.random as npr import autograd.scipy.signal from autograd import grad -import data_mnist convolve = autograd.scipy.signal.convolve diff --git a/examples/data.py b/examples/data.py index 71ec2a755..fc60eda86 100644 --- a/examples/data.py +++ b/examples/data.py @@ -1,9 +1,9 @@ -import matplotlib.pyplot as plt +import data_mnist import matplotlib.image +import matplotlib.pyplot as plt import autograd.numpy as np import autograd.numpy.random as npr -import data_mnist def load_mnist(): diff --git a/examples/data_mnist.py b/examples/data_mnist.py index 66bb61496..0fa6c2ddc 100644 --- a/examples/data_mnist.py +++ b/examples/data_mnist.py @@ -1,12 +1,11 @@ -import sys - -import os +import array import gzip +import os import struct -import array -import numpy as np from urllib.request import urlretrieve +import numpy as np + def download(url, filename): if not os.path.exists("data"): diff --git a/examples/deep_gaussian_process.py b/examples/deep_gaussian_process.py index b651cfa91..92e6d42b9 100644 --- a/examples/deep_gaussian_process.py +++ b/examples/deep_gaussian_process.py @@ -1,11 +1,10 @@ import matplotlib.pyplot as plt +from gaussian_process import make_gp_funs, rbf_covariance +from scipy.optimize import minimize import autograd.numpy as np import autograd.numpy.random as npr from autograd import value_and_grad -from scipy.optimize import minimize - -from gaussian_process import make_gp_funs, rbf_covariance def build_step_function_dataset(D=1, n_data=40, noise_std=0.1): diff --git a/examples/define_gradient.py b/examples/define_gradient.py index 44700e065..80e908dd4 100644 --- a/examples/define_gradient.py +++ b/examples/define_gradient.py @@ -4,9 +4,8 @@ import autograd.numpy as np import autograd.numpy.random as npr - from autograd import grad -from autograd.extend import primitive, defvjp +from autograd.extend import defvjp, primitive from autograd.test_util import check_grads diff --git a/examples/dot_graph.py b/examples/dot_graph.py index 66c79e565..026b5ce1e 100644 --- a/examples/dot_graph.py +++ b/examples/dot_graph.py @@ -5,8 +5,7 @@ """ import autograd.numpy as np -from autograd.tracer import trace, Node -from autograd import grad +from autograd.tracer import Node, trace class GraphNode(Node): diff --git a/examples/fluidsim/fluidsim.py b/examples/fluidsim/fluidsim.py index 0b41fd338..225e3c495 100644 --- a/examples/fluidsim/fluidsim.py +++ b/examples/fluidsim/fluidsim.py @@ -1,12 +1,12 @@ -import autograd.numpy as np -from autograd import value_and_grad - -from scipy.optimize import minimize -from scipy.misc import imread +import os import matplotlib import matplotlib.pyplot as plt -import os +from scipy.misc import imread +from scipy.optimize import minimize + +import autograd.numpy as np +from autograd import value_and_grad # Fluid simulation code based on # "Real-Time Fluid Dynamics for Games" by Jos Stam diff --git a/examples/fluidsim/wing.py b/examples/fluidsim/wing.py index cc74a5cee..22363df72 100644 --- a/examples/fluidsim/wing.py +++ b/examples/fluidsim/wing.py @@ -1,10 +1,10 @@ -import autograd.numpy as np -from autograd import value_and_grad +import os +import matplotlib.pyplot as plt from scipy.optimize import minimize -import matplotlib.pyplot as plt -import os +import autograd.numpy as np +from autograd import value_and_grad rows, cols = 40, 60 diff --git a/examples/gaussian_process.py b/examples/gaussian_process.py index 07f9b4a41..9ff292a27 100644 --- a/examples/gaussian_process.py +++ b/examples/gaussian_process.py @@ -1,11 +1,11 @@ import matplotlib.pyplot as plt +from scipy.optimize import minimize import autograd.numpy as np import autograd.numpy.random as npr -from autograd.numpy.linalg import solve import autograd.scipy.stats.multivariate_normal as mvn from autograd import value_and_grad -from scipy.optimize import minimize +from autograd.numpy.linalg import solve def make_gp_funs(cov_func, num_cov_params): diff --git a/examples/generative_adversarial_net.py b/examples/generative_adversarial_net.py index 478fbf1ae..a30c0a3ed 100644 --- a/examples/generative_adversarial_net.py +++ b/examples/generative_adversarial_net.py @@ -3,14 +3,13 @@ # but, it always collapses to generating a single image. # Let me know if you can get it to work! - David Duvenaud +from data import load_mnist, save_images + import autograd.numpy as np import autograd.numpy.random as npr from autograd import grad from autograd.misc import flatten -from data import load_mnist, save_images - - ### Define geneerator, discriminator, and objective ### diff --git a/examples/gmm.py b/examples/gmm.py index cc3670b41..175ed1252 100644 --- a/examples/gmm.py +++ b/examples/gmm.py @@ -3,15 +3,15 @@ works on arbitrarily-high dimension.""" import matplotlib.pyplot as plt +from data import make_pinwheel +from scipy.optimize import minimize import autograd.numpy as np import autograd.numpy.random as npr -from autograd import grad, hessian_vector_product -from scipy.optimize import minimize -from autograd.scipy.special import logsumexp import autograd.scipy.stats.multivariate_normal as mvn +from autograd import grad, hessian_vector_product from autograd.misc.flatten import flatten_func -from data import make_pinwheel +from autograd.scipy.special import logsumexp def init_gmm_params(num_components, D, scale, rs=npr.RandomState(0)): diff --git a/examples/gplvm.py b/examples/gplvm.py index 0bea00c1e..b150e4b2d 100644 --- a/examples/gplvm.py +++ b/examples/gplvm.py @@ -12,16 +12,15 @@ import matplotlib.pyplot as plt +from data import make_pinwheel +from gaussian_process import make_gp_funs, rbf_covariance +from scipy.optimize import minimize import autograd.numpy as np import autograd.numpy.random as npr from autograd import value_and_grad -from scipy.optimize import minimize from autograd.scipy.stats import norm -from gaussian_process import make_gp_funs, rbf_covariance -from data import make_pinwheel - if __name__ == "__main__": data_dimension = 2 # Normally the data dimension would be much higher. latent_dimension = 2 diff --git a/examples/hmm_em.py b/examples/hmm_em.py index 32a349c80..462a4a92c 100644 --- a/examples/hmm_em.py +++ b/examples/hmm_em.py @@ -1,10 +1,11 @@ +import string +from functools import partial +from os.path import dirname, join + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.scipy.special import logsumexp from autograd import value_and_grad as vgrad -from functools import partial -from os.path import join, dirname -import string +from autograd.scipy.special import logsumexp def EM(init_params, data, callback=None): diff --git a/examples/ica.py b/examples/ica.py index 10112499d..5d55e587b 100644 --- a/examples/ica.py +++ b/examples/ica.py @@ -1,13 +1,12 @@ -import matplotlib.pyplot as plt import matplotlib.cm as cm +import matplotlib.pyplot as plt +from scipy.optimize import minimize import autograd.numpy as np import autograd.numpy.random as npr import autograd.scipy.stats.t as t from autograd import value_and_grad -from scipy.optimize import minimize - def make_ica_funs(observed_dimension, latent_dimension): """These functions implement independent component analysis. diff --git a/examples/lstm.py b/examples/lstm.py index 6454d3058..274faad5f 100644 --- a/examples/lstm.py +++ b/examples/lstm.py @@ -3,13 +3,14 @@ has a fixed length.""" from os.path import dirname, join + +from rnn import build_dataset, concat_and_multiply, one_hot_to_string, sigmoid, string_to_one_hot + import autograd.numpy as np import autograd.numpy.random as npr from autograd import grad -from autograd.scipy.special import logsumexp - from autograd.misc.optimizers import adam -from rnn import string_to_one_hot, one_hot_to_string, build_dataset, sigmoid, concat_and_multiply +from autograd.scipy.special import logsumexp def init_lstm_params(input_size, state_size, output_size, param_scale=0.01, rs=npr.RandomState(0)): diff --git a/examples/mixture_variational_inference.py b/examples/mixture_variational_inference.py index 0edd1a34d..d5234f688 100644 --- a/examples/mixture_variational_inference.py +++ b/examples/mixture_variational_inference.py @@ -9,10 +9,9 @@ import autograd.numpy as np import autograd.numpy.random as npr import autograd.scipy.stats.norm as norm -from autograd.scipy.special import logsumexp - from autograd import grad from autograd.misc.optimizers import adam +from autograd.scipy.special import logsumexp def diag_gaussian_log_density(x, mu, log_std): diff --git a/examples/natural_gradient_black_box_svi.py b/examples/natural_gradient_black_box_svi.py index b0f9b41cc..274c8814c 100644 --- a/examples/natural_gradient_black_box_svi.py +++ b/examples/natural_gradient_black_box_svi.py @@ -1,13 +1,12 @@ import matplotlib.pyplot as plt +# same BBSVI function! +from black_box_svi import black_box_variational_inference + import autograd.numpy as np import autograd.scipy.stats.norm as norm - from autograd.misc.optimizers import adam, sgd -# same BBSVI function! -from black_box_svi import black_box_variational_inference - if __name__ == "__main__": # Specify an inference problem by its unnormalized log-density. # it's difficult to see the benefit in low dimensions diff --git a/examples/negative_binomial_maxlike.py b/examples/negative_binomial_maxlike.py index cfd8ec38a..d646c7a45 100644 --- a/examples/negative_binomial_maxlike.py +++ b/examples/negative_binomial_maxlike.py @@ -1,10 +1,9 @@ +import scipy.optimize + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.scipy.special import gammaln from autograd import grad - -import scipy.optimize - +from autograd.scipy.special import gammaln # The code in this example implements a method for finding a stationary point of # the negative binomial likelihood via Newton's method, described here: diff --git a/examples/neural_net.py b/examples/neural_net.py index 007787fcc..9fe4d2010 100644 --- a/examples/neural_net.py +++ b/examples/neural_net.py @@ -1,12 +1,13 @@ """A multi-layer perceptron for classification of MNIST handwritten digits.""" +from data import load_mnist + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.scipy.special import logsumexp from autograd import grad from autograd.misc.flatten import flatten from autograd.misc.optimizers import adam -from data import load_mnist +from autograd.scipy.special import logsumexp def init_random_params(scale, layer_sizes, rs=npr.RandomState(0)): diff --git a/examples/neural_net_regression.py b/examples/neural_net_regression.py index 2b1d860a8..076abfb51 100644 --- a/examples/neural_net_regression.py +++ b/examples/neural_net_regression.py @@ -5,7 +5,6 @@ import autograd.scipy.stats.norm as norm from autograd import grad from autograd.misc import flatten - from autograd.misc.optimizers import adam diff --git a/examples/ode_net.py b/examples/ode_net.py index afada8506..59ccb08a5 100644 --- a/examples/ode_net.py +++ b/examples/ode_net.py @@ -6,12 +6,11 @@ import numpy as npo import autograd.numpy as np +import autograd.numpy.random as npr from autograd import grad -from autograd.scipy.integrate import odeint from autograd.builtins import tuple from autograd.misc.optimizers import adam -import autograd.numpy.random as npr - +from autograd.scipy.integrate import odeint N = 30 # Dataset size D = 2 # Data dimension diff --git a/examples/print_trace.py b/examples/print_trace.py index 4b79316fe..eb1c42a1d 100644 --- a/examples/print_trace.py +++ b/examples/print_trace.py @@ -3,7 +3,7 @@ evaluated""" import autograd.numpy as np # autograd has already wrapped numpy for us -from autograd.tracer import trace, Node +from autograd.tracer import Node, trace class PrintNode(Node): diff --git a/examples/rkhs.py b/examples/rkhs.py index d9a8beff0..1e8d236db 100644 --- a/examples/rkhs.py +++ b/examples/rkhs.py @@ -5,9 +5,9 @@ import autograd.numpy as np import autograd.numpy.random as npr -from autograd.extend import primitive, defvjp, defjvp, VSpace, Box -from autograd.util import func from autograd import grad +from autograd.extend import Box, VSpace, defvjp, primitive +from autograd.util import func class RKHSFun: diff --git a/examples/rnn.py b/examples/rnn.py index 056dc0ee7..2f2b7dc30 100644 --- a/examples/rnn.py +++ b/examples/rnn.py @@ -2,13 +2,13 @@ This version vectorizes over multiple examples, but each string has a fixed length.""" +from os.path import dirname, join + import autograd.numpy as np import autograd.numpy.random as npr from autograd import grad -from autograd.scipy.special import logsumexp -from os.path import dirname, join from autograd.misc.optimizers import adam - +from autograd.scipy.special import logsumexp ### Helper functions ################# diff --git a/examples/rosenbrock.py b/examples/rosenbrock.py index ba796006a..f7f06c5d8 100644 --- a/examples/rosenbrock.py +++ b/examples/rosenbrock.py @@ -1,6 +1,7 @@ +from scipy.optimize import minimize + import autograd.numpy as np from autograd import value_and_grad -from scipy.optimize import minimize def rosenbrock(x): diff --git a/examples/sinusoid.py b/examples/sinusoid.py index bdae1cf4f..50b4d85a9 100644 --- a/examples/sinusoid.py +++ b/examples/sinusoid.py @@ -1,5 +1,6 @@ -import autograd.numpy as np import matplotlib.pyplot as plt + +import autograd.numpy as np from autograd import grad diff --git a/examples/tanh.py b/examples/tanh.py index 82f9be9ac..d70e7f7e0 100644 --- a/examples/tanh.py +++ b/examples/tanh.py @@ -1,5 +1,6 @@ -import autograd.numpy as np import matplotlib.pyplot as plt + +import autograd.numpy as np from autograd import elementwise_grad as egrad """ diff --git a/examples/variational_autoencoder.py b/examples/variational_autoencoder.py index 17185a106..b96df7199 100644 --- a/examples/variational_autoencoder.py +++ b/examples/variational_autoencoder.py @@ -1,13 +1,13 @@ # Implements auto-encoding variational Bayes. +from data import load_mnist, save_images + import autograd.numpy as np import autograd.numpy.random as npr import autograd.scipy.stats.norm as norm -from autograd.scipy.special import expit as sigmoid - from autograd import grad from autograd.misc.optimizers import adam -from data import load_mnist, save_images +from autograd.scipy.special import expit as sigmoid def diag_gaussian_log_density(x, mu, log_std): diff --git a/tests/_test_complexity.py b/tests/_test_complexity.py index 91eb121be..9813b8873 100644 --- a/tests/_test_complexity.py +++ b/tests/_test_complexity.py @@ -1,7 +1,8 @@ import time import warnings -from autograd import grad, deriv + import autograd.numpy as np +from autograd import deriv, grad from autograd.builtins import list as make_list diff --git a/tests/numpy_utils.py b/tests/numpy_utils.py index f698ff7a1..f5cebb187 100644 --- a/tests/numpy_utils.py +++ b/tests/numpy_utils.py @@ -1,8 +1,5 @@ -import itertools as it import autograd.numpy.random as npr -from autograd import grad from autograd.test_util import combo_check -import warnings def stat_check(fun, test_complex=True, **kwargs): diff --git a/tests/profiling.py b/tests/profiling.py index fe0fa62fa..ec17b0983 100644 --- a/tests/profiling.py +++ b/tests/profiling.py @@ -1,8 +1,9 @@ -from autograd import grad +from contextlib import contextmanager +from time import time + import autograd.numpy as np import autograd.numpy.random as npr -from time import time -from contextlib import contextmanager +from autograd import grad @contextmanager @@ -39,7 +40,6 @@ def convolution(): def dot_equivalent(): # MNIST-scale convolution operation - import autograd.scipy.signal dat = npr.randn(256, 3, 24, 5, 24, 5) kernel = npr.randn(3, 5, 5) diff --git a/tests/test_binary_ops.py b/tests/test_binary_ops.py index a1024b788..1d6b70dc3 100644 --- a/tests/test_binary_ops.py +++ b/tests/test_binary_ops.py @@ -1,9 +1,10 @@ +import itertools as it import warnings + import autograd.numpy as np import autograd.numpy.random as npr -import itertools as it -from autograd.test_util import check_grads from autograd import grad, value_and_grad +from autograd.test_util import check_grads rs = npr.RandomState(0) @@ -48,7 +49,7 @@ def test_mod(): fun = lambda x, y: x % y make_gap_from_zero = lambda x: np.sqrt(x**2 + 0.5) for arg1, arg2 in arg_pairs(): - if not arg1 is arg2: # Gradient undefined at x == y + if arg1 is not arg2: # Gradient undefined at x == y arg1 = make_gap_from_zero(arg1) arg2 = make_gap_from_zero(arg2) check_grads(fun)(arg1, arg2) diff --git a/tests/test_complex.py b/tests/test_complex.py index e17611ca1..9a1162f72 100644 --- a/tests/test_complex.py +++ b/tests/test_complex.py @@ -1,7 +1,7 @@ import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads from autograd import grad +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_core.py b/tests/test_core.py index 72400e3ff..e8b6fcd80 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -2,6 +2,7 @@ on basic operations even without numpy.""" import warnings + from autograd.core import make_vjp from autograd.wrap_util import unary_to_nary diff --git a/tests/test_dict.py b/tests/test_dict.py index 069c2504c..11fb6bed7 100644 --- a/tests/test_dict.py +++ b/tests/test_dict.py @@ -1,9 +1,11 @@ +import operator as op + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads -from autograd import dict as ag_dict, isinstance as ag_isinstance +from autograd import dict as ag_dict from autograd import grad -import operator as op +from autograd import isinstance as ag_isinstance +from autograd.test_util import check_grads npr.seed(0) diff --git a/tests/test_direct.py b/tests/test_direct.py index 98d533d53..15612b068 100644 --- a/tests/test_direct.py +++ b/tests/test_direct.py @@ -4,10 +4,11 @@ """ import numpy as onp -import autograd.numpy as np -from autograd import grad, deriv, holomorphic_grad import pytest +import autograd.numpy as np +from autograd import deriv, grad, holomorphic_grad + def test_grad(): def fun(x): diff --git a/tests/test_fft.py b/tests/test_fft.py index 52a9843e0..1de7772c7 100644 --- a/tests/test_fft.py +++ b/tests/test_fft.py @@ -1,9 +1,10 @@ from functools import partial + +import pytest + import autograd.numpy as np import autograd.numpy.random as npr from autograd.test_util import check_grads -from autograd import grad -import pytest npr.seed(1) diff --git a/tests/test_graphs.py b/tests/test_graphs.py index 8cf80f754..f27f16dc6 100644 --- a/tests/test_graphs.py +++ b/tests/test_graphs.py @@ -1,9 +1,11 @@ +import warnings + +import pytest + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads from autograd import grad -import warnings -import pytest +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_jacobian.py b/tests/test_jacobian.py index f059d37bc..2ea9b9e6f 100644 --- a/tests/test_jacobian.py +++ b/tests/test_jacobian.py @@ -1,7 +1,7 @@ import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads from autograd import grad, jacobian +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_linalg.py b/tests/test_linalg.py index ada549783..3bf540a28 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -1,12 +1,12 @@ -import itertools +from functools import partial + import numpy as onp +import pytest + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads from autograd import tuple -from autograd import grad -from functools import partial -import pytest +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_list.py b/tests/test_list.py index a6e382310..2dcaf2485 100644 --- a/tests/test_list.py +++ b/tests/test_list.py @@ -1,8 +1,9 @@ import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads, check_vjp, check_jvp from autograd import grad -from autograd import list as ag_list, isinstance as ag_isinstance +from autograd import isinstance as ag_isinstance +from autograd import list as ag_list +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_logic.py b/tests/test_logic.py index 61f1dd189..c43653644 100644 --- a/tests/test_logic.py +++ b/tests/test_logic.py @@ -1,11 +1,13 @@ +import warnings from contextlib import contextmanager + import pytest -import warnings + import autograd.numpy as np -from autograd import grad, deriv +from autograd import deriv, grad +from autograd.core import primitive_vjps from autograd.extend import primitive from autograd.test_util import check_grads -from autograd.core import primitive_vjps def test_assert(): diff --git a/tests/test_misc.py b/tests/test_misc.py index fbac999b1..5cffd3e52 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -1,9 +1,9 @@ import autograd.numpy as np import autograd.numpy.random as npr +from autograd import grad, make_vjp +from autograd.misc import const_graph, flatten from autograd.test_util import scalar_close -from autograd import make_vjp, grad from autograd.tracer import primitive -from autograd.misc import const_graph, flatten def test_const_graph(): diff --git a/tests/test_numpy.py b/tests/test_numpy.py index 6ec15f011..e4cd0de4d 100644 --- a/tests/test_numpy.py +++ b/tests/test_numpy.py @@ -1,10 +1,11 @@ import warnings +from numpy_utils import combo_check + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads from autograd import grad -from numpy_utils import combo_check +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_scalar_ops.py b/tests/test_scalar_ops.py index 838796aa0..f0b0c9884 100644 --- a/tests/test_scalar_ops.py +++ b/tests/test_scalar_ops.py @@ -1,7 +1,7 @@ import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads from autograd import grad +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_scipy.py b/tests/test_scipy.py index ff4841140..3662883a6 100644 --- a/tests/test_scipy.py +++ b/tests/test_scipy.py @@ -1,4 +1,5 @@ from functools import partial + import numpy as npo try: @@ -8,19 +9,19 @@ warn("Skipping scipy tests.") else: + from numpy_utils import unary_ufunc_check + from scipy.signal import convolve as sp_convolve + import autograd.numpy as np import autograd.numpy.random as npr + import autograd.scipy.integrate as integrate + import autograd.scipy.linalg as spla import autograd.scipy.signal + import autograd.scipy.special as special import autograd.scipy.stats as stats import autograd.scipy.stats.multivariate_normal as mvn - import autograd.scipy.special as special - import autograd.scipy.linalg as spla - import autograd.scipy.integrate as integrate from autograd import grad - from scipy.signal import convolve as sp_convolve - - from autograd.test_util import combo_check, check_grads - from numpy_utils import unary_ufunc_check + from autograd.test_util import check_grads, combo_check npr.seed(1) R = npr.randn diff --git a/tests/test_systematic.py b/tests/test_systematic.py index 71230bdf4..46425b8e2 100644 --- a/tests/test_systematic.py +++ b/tests/test_systematic.py @@ -1,8 +1,10 @@ +import operator as op + import numpy as onp -import autograd.numpy.random as npr +from numpy_utils import binary_ufunc_check, binary_ufunc_check_no_same_args, stat_check, unary_ufunc_check + import autograd.numpy as np -import operator as op -from numpy_utils import stat_check, unary_ufunc_check, binary_ufunc_check, binary_ufunc_check_no_same_args +import autograd.numpy.random as npr from autograd.test_util import combo_check npr.seed(0) diff --git a/tests/test_tests.py b/tests/test_tests.py index 7dba3f7e7..cb9c21859 100644 --- a/tests/test_tests.py +++ b/tests/test_tests.py @@ -1,7 +1,8 @@ -from autograd.tracer import primitive, getval +from pytest import raises + from autograd.extend import defvjp from autograd.test_util import check_grads -from pytest import raises +from autograd.tracer import primitive def test_check_vjp_1st_order_fail(): diff --git a/tests/test_truediv.py b/tests/test_truediv.py index 1ebc14313..e13e03e8a 100644 --- a/tests/test_truediv.py +++ b/tests/test_truediv.py @@ -1,9 +1,9 @@ # This file is to check that future division works. +from test_binary_ops import arg_pairs + import autograd.numpy as np from autograd.test_util import check_grads -from autograd import grad -from test_binary_ops import arg_pairs def test_div(): diff --git a/tests/test_tuple.py b/tests/test_tuple.py index 9193f280e..2784421f0 100644 --- a/tests/test_tuple.py +++ b/tests/test_tuple.py @@ -1,8 +1,9 @@ import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads -from autograd import tuple as ag_tuple, isinstance as ag_isinstance from autograd import grad +from autograd import isinstance as ag_isinstance +from autograd import tuple as ag_tuple +from autograd.test_util import check_grads npr.seed(1) diff --git a/tests/test_vspaces.py b/tests/test_vspaces.py index f3956c1f5..1013c57ea 100644 --- a/tests/test_vspaces.py +++ b/tests/test_vspaces.py @@ -1,9 +1,10 @@ +import itertools as it from functools import reduce + +import numpy as np + from autograd.core import vspace -from autograd.numpy.numpy_vspaces import ArrayVSpace from autograd.test_util import check_grads, scalar_close -import numpy as np -import itertools as it def check_vspace(value): diff --git a/tests/test_wrappers.py b/tests/test_wrappers.py index 3fb326e47..985d0ab22 100644 --- a/tests/test_wrappers.py +++ b/tests/test_wrappers.py @@ -1,23 +1,24 @@ import warnings from functools import partial + import autograd.numpy as np import autograd.numpy.random as npr -from autograd.test_util import check_grads, check_equivalent # , nd -from autograd.tracer import primitive, isbox from autograd import ( - grad, + checkpoint, elementwise_grad, - jacobian, - value_and_grad, - hessian_tensor_product, + grad, + grad_and_aux, hessian, + hessian_tensor_product, + jacobian, + make_ggnvp, make_hvp, - tensor_jacobian_product, - checkpoint, make_jvp, - make_ggnvp, - grad_and_aux, + tensor_jacobian_product, + value_and_grad, ) +from autograd.test_util import check_equivalent, check_grads # , nd +from autograd.tracer import isbox npr.seed(1) @@ -158,7 +159,7 @@ def test_hessian_matrix_product(): check_equivalent(np.tensordot(H, V), hessian_tensor_product(fun)(a, V)) -def test_hessian_tensor_product(): +def test_hessian_tensor_product_3d(): fun = lambda a: np.sum(np.sin(a)) a = npr.randn(5, 4, 3) V = npr.randn(5, 4, 3) @@ -408,8 +409,6 @@ def foo(x): assert grad.__name__ == "grad" # Python 3.13: Compiler now strip indents from docstrings. # https://docs.python.org/3.13/whatsnew/3.13.html#other-language-changes - assert grad.__doc__.startswith( - tuple(f"\n{indent}Returns a function which" for indent in (" ", "")) - ) + assert grad.__doc__.startswith(tuple(f"\n{indent}Returns a function which" for indent in (" ", ""))) assert grad(foo, 1).__name__ == "grad_of_foo_wrt_argnum_1" assert grad(foo, 1).__doc__.startswith(" grad of function foo with")