diff --git a/docs/Deconvolution by FIR.rst b/docs/Deconvolution by FIR.rst index 85c89764a..bd478e5ff 100644 --- a/docs/Deconvolution by FIR.rst +++ b/docs/Deconvolution by FIR.rst @@ -20,7 +20,7 @@ Design of a digital deconvolution filter (FIR type) .. code:: python - from PyDynamic.model_estimation.fit_filter import invLSFIR_unc + from PyDynamic.model_estimation.fit_filter import LSFIR from PyDynamic.misc.SecondOrderSystem import * from PyDynamic.misc.testsignals import shocklikeGaussian from PyDynamic.misc.filterstuff import kaiser_lowpass, db @@ -193,7 +193,7 @@ Filter coefficients and associated uncertainties are thus obtained as # Calculation of FIR deconvolution filter and its assoc. unc. N = 12; tau = N//2 - bF, UbF = invLSFIR_unc(H,UH,N,tau,f,Fs) + bF, UbF = LSFIR(H,N,tau,f,Fs,UH=UH) .. parsed-literal:: @@ -297,8 +297,8 @@ Fit an FIR filter to the reciprocal of the measured frequency response .. code:: python - from PyDynamic.model_estimation.fit_filter import invLSFIR_unc - bF, UbF = invLSFIR_unc(H,UH,N,tau,f,Fs, verbose=False) + from PyDynamic.model_estimation.fit_filter import LSFIR + bF, UbF = LSFIR(H,N,tau,f,Fs,verbose=False,UH=UH) with diff --git a/docs/PyDynamic.uncertainty.rst b/docs/PyDynamic.uncertainty.rst index 41f62df8a..e5c307bb3 100644 --- a/docs/PyDynamic.uncertainty.rst +++ b/docs/PyDynamic.uncertainty.rst @@ -54,5 +54,5 @@ Monte Carlo methods for digital filtering Uncertainty evaluation for interpolation ---------------------------------------- -.. automodule:: PyDynamic.uncertainty.interpolation +.. automodule:: PyDynamic.uncertainty.interpolate :members: diff --git a/docs/conf.py b/docs/conf.py index 2a86f13ef..95e8d6a5d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -138,9 +138,6 @@ # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - # -- Options for HTML output ---------------------------------------------- diff --git a/requirements/dev-requirements-py36.txt b/requirements/dev-requirements-py36.txt index 6d73b1be7..41a72a24c 100644 --- a/requirements/dev-requirements-py36.txt +++ b/requirements/dev-requirements-py36.txt @@ -6,7 +6,7 @@ # alabaster==0.7.12 # via sphinx -astroid==2.7.3 +astroid==2.8.2 # via pylint async-generator==1.10 # via nbclient @@ -19,19 +19,19 @@ babel==2.9.1 # via sphinx backports.entry-points-selectable==1.1.0 # via virtualenv -black==21.8b0 +black==21.9b0 # via -r requirements/dev-requirements.in bleach==4.1.0 # via # nbconvert # readme-renderer -certifi==2021.5.30 +certifi==2021.10.8 # via requests -cffi==1.14.6 +cffi==1.15.0 # via cryptography -charset-normalizer==2.0.4 +charset-normalizer==2.0.7 # via requests -click==8.0.1 +click==8.0.3 # via # black # click-log @@ -42,9 +42,9 @@ colorama==0.4.4 # via twine commonmark==0.9.1 # via recommonmark -coverage==5.5 +coverage[toml]==6.0.2 # via pytest-cov -cryptography==3.4.8 +cryptography==35.0.0 # via secretstorage dataclasses==0.8 # via black @@ -52,9 +52,9 @@ decorator==5.1.0 # via traitlets defusedxml==0.7.1 # via nbconvert -distlib==0.3.2 +distlib==0.3.3 # via virtualenv -docutils==0.16 +docutils==0.17.1 # via # nbsphinx # readme-renderer @@ -67,7 +67,7 @@ entrypoints==0.3 # via # jupyter-client # nbconvert -filelock==3.0.12 +filelock==3.3.1 # via # tox # virtualenv @@ -75,9 +75,9 @@ gitdb==4.0.7 # via gitpython gitpython==3.1.18 # via python-semantic-release -hypothesis==6.21.0 +hypothesis==6.23.2 # via -r requirements/dev-requirements.in -idna==3.2 +idna==3.3 # via requests imagesize==1.2.0 # via sphinx @@ -108,16 +108,16 @@ jeepney==0.7.1 # via # keyring # secretstorage -jinja2==3.0.1 +jinja2==3.0.2 # via # nbconvert # nbsphinx # sphinx jsonschema==3.2.0 # via nbformat -jupyter-client==7.0.2 +jupyter-client==7.0.6 # via nbclient -jupyter-core==4.7.1 +jupyter-core==4.8.1 # via # jupyter-client # nbconvert @@ -158,13 +158,13 @@ packaging==21.0 # setuptools-scm # sphinx # tox -pandocfilters==1.4.3 +pandocfilters==1.5.0 # via nbconvert pathspec==0.9.0 # via black pkginfo==1.7.1 # via twine -platformdirs==2.3.0 +platformdirs==2.4.0 # via # black # pylint @@ -185,7 +185,7 @@ pygments==2.10.0 # nbconvert # readme-renderer # sphinx -pylint==2.10.2 +pylint==2.11.1 # via -r requirements/dev-requirements.in pyparsing==2.4.7 # via @@ -197,7 +197,7 @@ pytest==6.2.5 # via # -r requirements/dev-requirements.in # pytest-cov -pytest-cov==2.12.1 +pytest-cov==3.0.0 # via -r requirements/dev-requirements.in python-dateutil==2.8.2 # via @@ -207,17 +207,17 @@ python-gitlab==2.10.1 # via python-semantic-release python-semantic-release==7.19.2 # via -r requirements/dev-requirements.in -pytz==2021.1 +pytz==2021.3 # via # -c requirements/requirements-py36.txt # babel -pyzmq==22.2.1 +pyzmq==22.3.0 # via jupyter-client -readme-renderer==29.0 +readme-renderer==30.0 # via twine recommonmark==0.7.1 # via -r requirements/dev-requirements.in -regex==2021.8.28 +regex==2021.10.8 # via black requests==2.26.0 # via @@ -232,7 +232,7 @@ requests-toolbelt==0.9.1 # twine rfc3986==1.5.0 # via twine -rope==0.19.0 +rope==0.20.1 # via -r requirements/dev-requirements.in secretstorage==3.3.1 # via keyring @@ -246,7 +246,6 @@ six==1.16.0 # bleach # jsonschema # python-dateutil - # readme-renderer # tox # traitlets # virtualenv @@ -262,7 +261,7 @@ sphinx==4.2.0 # nbsphinx # recommonmark # sphinx-rtd-theme -sphinx-rtd-theme==0.5.2 +sphinx-rtd-theme==1.0.0 # via -r requirements/dev-requirements.in sphinxcontrib-applehelp==1.0.2 # via sphinx @@ -282,19 +281,19 @@ toml==0.10.2 # via # pylint # pytest - # pytest-cov # tox tomli==1.2.1 # via # black + # coverage # setuptools-scm tomlkit==0.7.0 # via python-semantic-release tornado==6.1 # via jupyter-client -tox==3.24.3 +tox==3.24.4 # via -r requirements/dev-requirements.in -tqdm==4.62.2 +tqdm==4.62.3 # via twine traitlets==4.3.3 # via @@ -316,9 +315,10 @@ typing-extensions==3.10.0.2 # black # gitpython # importlib-metadata -urllib3==1.26.6 + # pylint +urllib3==1.26.7 # via requests -virtualenv==20.7.2 +virtualenv==20.8.1 # via tox webencodings==0.5.1 # via bleach @@ -326,7 +326,7 @@ wheel==0.37.0 # via python-semantic-release wrapt==1.12.1 # via astroid -zipp==3.5.0 +zipp==3.6.0 # via # importlib-metadata # importlib-resources diff --git a/requirements/dev-requirements-py37.txt b/requirements/dev-requirements-py37.txt index a211d8cb5..f40276e6d 100644 --- a/requirements/dev-requirements-py37.txt +++ b/requirements/dev-requirements-py37.txt @@ -6,7 +6,7 @@ # alabaster==0.7.12 # via sphinx -astroid==2.7.3 +astroid==2.8.2 # via pylint attrs==21.2.0 # via @@ -17,19 +17,19 @@ babel==2.9.1 # via sphinx backports.entry-points-selectable==1.1.0 # via virtualenv -black==21.8b0 +black==21.9b0 # via -r requirements/dev-requirements.in bleach==4.1.0 # via # nbconvert # readme-renderer -certifi==2021.5.30 +certifi==2021.10.8 # via requests -cffi==1.14.6 +cffi==1.15.0 # via cryptography -charset-normalizer==2.0.4 +charset-normalizer==2.0.7 # via requests -click==8.0.1 +click==8.0.3 # via # black # click-log @@ -40,15 +40,15 @@ colorama==0.4.4 # via twine commonmark==0.9.1 # via recommonmark -coverage==5.5 +coverage[toml]==6.0.2 # via pytest-cov -cryptography==3.4.8 +cryptography==35.0.0 # via secretstorage defusedxml==0.7.1 # via nbconvert -distlib==0.3.2 +distlib==0.3.3 # via virtualenv -docutils==0.16 +docutils==0.17.1 # via # nbsphinx # readme-renderer @@ -61,17 +61,17 @@ entrypoints==0.3 # via # jupyter-client # nbconvert -filelock==3.0.12 +filelock==3.3.1 # via # tox # virtualenv gitdb==4.0.7 # via gitpython -gitpython==3.1.18 +gitpython==3.1.24 # via python-semantic-release -hypothesis==6.21.0 +hypothesis==6.23.2 # via -r requirements/dev-requirements.in -idna==3.2 +idna==3.3 # via requests imagesize==1.2.0 # via sphinx @@ -98,16 +98,16 @@ jeepney==0.7.1 # via # keyring # secretstorage -jinja2==3.0.1 +jinja2==3.0.2 # via # nbconvert # nbsphinx # sphinx -jsonschema==3.2.0 +jsonschema==4.1.0 # via nbformat -jupyter-client==7.0.2 +jupyter-client==7.0.6 # via nbclient -jupyter-core==4.7.1 +jupyter-core==4.8.1 # via # jupyter-client # nbconvert @@ -128,7 +128,7 @@ mypy-extensions==0.4.3 # via black nbclient==0.5.4 # via nbconvert -nbconvert==6.1.0 +nbconvert==6.2.0 # via nbsphinx nbformat==5.1.3 # via @@ -148,13 +148,13 @@ packaging==21.0 # setuptools-scm # sphinx # tox -pandocfilters==1.4.3 +pandocfilters==1.5.0 # via nbconvert pathspec==0.9.0 # via black pkginfo==1.7.1 # via twine -platformdirs==2.3.0 +platformdirs==2.4.0 # via # black # pylint @@ -175,7 +175,7 @@ pygments==2.10.0 # nbconvert # readme-renderer # sphinx -pylint==2.10.2 +pylint==2.11.1 # via -r requirements/dev-requirements.in pyparsing==2.4.7 # via @@ -187,7 +187,7 @@ pytest==6.2.5 # via # -r requirements/dev-requirements.in # pytest-cov -pytest-cov==2.12.1 +pytest-cov==3.0.0 # via -r requirements/dev-requirements.in python-dateutil==2.8.2 # via @@ -197,17 +197,17 @@ python-gitlab==2.10.1 # via python-semantic-release python-semantic-release==7.19.2 # via -r requirements/dev-requirements.in -pytz==2021.1 +pytz==2021.3 # via # -c requirements/requirements-py37.txt # babel -pyzmq==22.2.1 +pyzmq==22.3.0 # via jupyter-client -readme-renderer==29.0 +readme-renderer==30.0 # via twine recommonmark==0.7.1 # via -r requirements/dev-requirements.in -regex==2021.8.28 +regex==2021.10.8 # via black requests==2.26.0 # via @@ -222,7 +222,7 @@ requests-toolbelt==0.9.1 # twine rfc3986==1.5.0 # via twine -rope==0.19.0 +rope==0.20.1 # via -r requirements/dev-requirements.in secretstorage==3.3.1 # via keyring @@ -234,9 +234,7 @@ six==1.16.0 # via # -c requirements/requirements-py37.txt # bleach - # jsonschema # python-dateutil - # readme-renderer # tox # virtualenv smmap==4.0.0 @@ -251,7 +249,7 @@ sphinx==4.2.0 # nbsphinx # recommonmark # sphinx-rtd-theme -sphinx-rtd-theme==0.5.2 +sphinx-rtd-theme==1.0.0 # via -r requirements/dev-requirements.in sphinxcontrib-applehelp==1.0.2 # via sphinx @@ -271,19 +269,19 @@ toml==0.10.2 # via # pylint # pytest - # pytest-cov # tox tomli==1.2.1 # via # black + # coverage # setuptools-scm tomlkit==0.7.0 # via python-semantic-release tornado==6.1 # via jupyter-client -tox==3.24.3 +tox==3.24.4 # via -r requirements/dev-requirements.in -tqdm==4.62.2 +tqdm==4.62.3 # via twine traitlets==5.1.0 # via @@ -305,9 +303,10 @@ typing-extensions==3.10.0.2 # black # gitpython # importlib-metadata -urllib3==1.26.6 + # pylint +urllib3==1.26.7 # via requests -virtualenv==20.7.2 +virtualenv==20.8.1 # via tox webencodings==0.5.1 # via bleach @@ -315,7 +314,7 @@ wheel==0.37.0 # via python-semantic-release wrapt==1.12.1 # via astroid -zipp==3.5.0 +zipp==3.6.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/dev-requirements-py38.txt b/requirements/dev-requirements-py38.txt index 5e76d1274..5444bb4fc 100644 --- a/requirements/dev-requirements-py38.txt +++ b/requirements/dev-requirements-py38.txt @@ -6,7 +6,7 @@ # alabaster==0.7.12 # via sphinx -astroid==2.7.3 +astroid==2.8.2 # via pylint attrs==21.2.0 # via @@ -17,19 +17,19 @@ babel==2.9.1 # via sphinx backports.entry-points-selectable==1.1.0 # via virtualenv -black==21.8b0 +black==21.9b0 # via -r requirements/dev-requirements.in bleach==4.1.0 # via # nbconvert # readme-renderer -certifi==2021.5.30 +certifi==2021.10.8 # via requests -cffi==1.14.6 +cffi==1.15.0 # via cryptography -charset-normalizer==2.0.4 +charset-normalizer==2.0.7 # via requests -click==8.0.1 +click==8.0.3 # via # black # click-log @@ -40,15 +40,15 @@ colorama==0.4.4 # via twine commonmark==0.9.1 # via recommonmark -coverage==5.5 +coverage[toml]==6.0.2 # via pytest-cov -cryptography==3.4.8 +cryptography==35.0.0 # via secretstorage defusedxml==0.7.1 # via nbconvert -distlib==0.3.2 +distlib==0.3.3 # via virtualenv -docutils==0.16 +docutils==0.17.1 # via # nbsphinx # readme-renderer @@ -61,17 +61,17 @@ entrypoints==0.3 # via # jupyter-client # nbconvert -filelock==3.0.12 +filelock==3.3.1 # via # tox # virtualenv gitdb==4.0.7 # via gitpython -gitpython==3.1.18 +gitpython==3.1.24 # via python-semantic-release -hypothesis==6.21.0 +hypothesis==6.23.2 # via -r requirements/dev-requirements.in -idna==3.2 +idna==3.3 # via requests imagesize==1.2.0 # via sphinx @@ -91,16 +91,16 @@ jeepney==0.7.1 # via # keyring # secretstorage -jinja2==3.0.1 +jinja2==3.0.2 # via # nbconvert # nbsphinx # sphinx -jsonschema==3.2.0 +jsonschema==4.1.0 # via nbformat -jupyter-client==7.0.2 +jupyter-client==7.0.6 # via nbclient -jupyter-core==4.7.1 +jupyter-core==4.8.1 # via # jupyter-client # nbconvert @@ -121,7 +121,7 @@ mypy-extensions==0.4.3 # via black nbclient==0.5.4 # via nbconvert -nbconvert==6.1.0 +nbconvert==6.2.0 # via nbsphinx nbformat==5.1.3 # via @@ -141,13 +141,13 @@ packaging==21.0 # setuptools-scm # sphinx # tox -pandocfilters==1.4.3 +pandocfilters==1.5.0 # via nbconvert pathspec==0.9.0 # via black pkginfo==1.7.1 # via twine -platformdirs==2.3.0 +platformdirs==2.4.0 # via # black # pylint @@ -168,7 +168,7 @@ pygments==2.10.0 # nbconvert # readme-renderer # sphinx -pylint==2.10.2 +pylint==2.11.1 # via -r requirements/dev-requirements.in pyparsing==2.4.7 # via @@ -180,7 +180,7 @@ pytest==6.2.5 # via # -r requirements/dev-requirements.in # pytest-cov -pytest-cov==2.12.1 +pytest-cov==3.0.0 # via -r requirements/dev-requirements.in python-dateutil==2.8.2 # via @@ -190,17 +190,17 @@ python-gitlab==2.10.1 # via python-semantic-release python-semantic-release==7.19.2 # via -r requirements/dev-requirements.in -pytz==2021.1 +pytz==2021.3 # via # -c requirements/requirements-py38.txt # babel -pyzmq==22.2.1 +pyzmq==22.3.0 # via jupyter-client -readme-renderer==29.0 +readme-renderer==30.0 # via twine recommonmark==0.7.1 # via -r requirements/dev-requirements.in -regex==2021.8.28 +regex==2021.10.8 # via black requests==2.26.0 # via @@ -215,7 +215,7 @@ requests-toolbelt==0.9.1 # twine rfc3986==1.5.0 # via twine -rope==0.19.0 +rope==0.20.1 # via -r requirements/dev-requirements.in secretstorage==3.3.1 # via keyring @@ -227,9 +227,7 @@ six==1.16.0 # via # -c requirements/requirements-py38.txt # bleach - # jsonschema # python-dateutil - # readme-renderer # tox # virtualenv smmap==4.0.0 @@ -244,7 +242,7 @@ sphinx==4.2.0 # nbsphinx # recommonmark # sphinx-rtd-theme -sphinx-rtd-theme==0.5.2 +sphinx-rtd-theme==1.0.0 # via -r requirements/dev-requirements.in sphinxcontrib-applehelp==1.0.2 # via sphinx @@ -264,19 +262,19 @@ toml==0.10.2 # via # pylint # pytest - # pytest-cov # tox tomli==1.2.1 # via # black + # coverage # setuptools-scm tomlkit==0.7.0 # via python-semantic-release tornado==6.1 # via jupyter-client -tox==3.24.3 +tox==3.24.4 # via -r requirements/dev-requirements.in -tqdm==4.62.2 +tqdm==4.62.3 # via twine traitlets==5.1.0 # via @@ -289,10 +287,14 @@ traitlets==5.1.0 twine==3.4.2 # via python-semantic-release typing-extensions==3.10.0.2 - # via black -urllib3==1.26.6 + # via + # astroid + # black + # gitpython + # pylint +urllib3==1.26.7 # via requests -virtualenv==20.7.2 +virtualenv==20.8.1 # via tox webencodings==0.5.1 # via bleach @@ -300,7 +302,7 @@ wheel==0.37.0 # via python-semantic-release wrapt==1.12.1 # via astroid -zipp==3.5.0 +zipp==3.6.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/dev-requirements-py39.txt b/requirements/dev-requirements-py39.txt index 15fa70483..f3cfd9484 100644 --- a/requirements/dev-requirements-py39.txt +++ b/requirements/dev-requirements-py39.txt @@ -2,11 +2,11 @@ # This file is autogenerated by pip-compile with python 3.9 # To update, run: # -# pip-compile requirements/dev-requirements-py39.in +# pip-compile --output-file=requirements/dev-requirements-py39.txt requirements/dev-requirements-py39.in # alabaster==0.7.12 # via sphinx -astroid==2.7.3 +astroid==2.8.2 # via pylint attrs==21.2.0 # via @@ -17,19 +17,19 @@ babel==2.9.1 # via sphinx backports.entry-points-selectable==1.1.0 # via virtualenv -black==21.8b0 +black==21.9b0 # via -r requirements/dev-requirements.in bleach==4.1.0 # via # nbconvert # readme-renderer -certifi==2021.5.30 +certifi==2021.10.8 # via requests -cffi==1.14.6 +cffi==1.15.0 # via cryptography -charset-normalizer==2.0.4 +charset-normalizer==2.0.7 # via requests -click==8.0.1 +click==8.0.3 # via # black # click-log @@ -40,15 +40,15 @@ colorama==0.4.4 # via twine commonmark==0.9.1 # via recommonmark -coverage==5.5 +coverage[toml]==6.0.2 # via pytest-cov -cryptography==3.4.8 +cryptography==35.0.0 # via secretstorage defusedxml==0.7.1 # via nbconvert -distlib==0.3.2 +distlib==0.3.3 # via virtualenv -docutils==0.16 +docutils==0.17.1 # via # nbsphinx # readme-renderer @@ -61,17 +61,17 @@ entrypoints==0.3 # via # jupyter-client # nbconvert -filelock==3.0.12 +filelock==3.3.1 # via # tox # virtualenv gitdb==4.0.7 # via gitpython -gitpython==3.1.18 +gitpython==3.1.24 # via python-semantic-release -hypothesis==6.21.1 +hypothesis==6.23.2 # via -r requirements/dev-requirements.in -idna==3.2 +idna==3.3 # via requests imagesize==1.2.0 # via sphinx @@ -91,16 +91,16 @@ jeepney==0.7.1 # via # keyring # secretstorage -jinja2==3.0.1 +jinja2==3.0.2 # via # nbconvert # nbsphinx # sphinx -jsonschema==3.2.0 +jsonschema==4.1.0 # via nbformat -jupyter-client==7.0.2 +jupyter-client==7.0.6 # via nbclient -jupyter-core==4.7.1 +jupyter-core==4.8.1 # via # jupyter-client # nbconvert @@ -121,7 +121,7 @@ mypy-extensions==0.4.3 # via black nbclient==0.5.4 # via nbconvert -nbconvert==6.1.0 +nbconvert==6.2.0 # via nbsphinx nbformat==5.1.3 # via @@ -141,13 +141,13 @@ packaging==21.0 # setuptools-scm # sphinx # tox -pandocfilters==1.4.3 +pandocfilters==1.5.0 # via nbconvert pathspec==0.9.0 # via black pkginfo==1.7.1 # via twine -platformdirs==2.3.0 +platformdirs==2.4.0 # via # black # pylint @@ -168,7 +168,7 @@ pygments==2.10.0 # nbconvert # readme-renderer # sphinx -pylint==2.10.2 +pylint==2.11.1 # via -r requirements/dev-requirements.in pyparsing==2.4.7 # via @@ -180,7 +180,7 @@ pytest==6.2.5 # via # -r requirements/dev-requirements.in # pytest-cov -pytest-cov==2.12.1 +pytest-cov==3.0.0 # via -r requirements/dev-requirements.in python-dateutil==2.8.2 # via @@ -190,17 +190,17 @@ python-gitlab==2.10.1 # via python-semantic-release python-semantic-release==7.19.2 # via -r requirements/dev-requirements.in -pytz==2021.1 +pytz==2021.3 # via # -c requirements/requirements-py39.txt # babel -pyzmq==22.2.1 +pyzmq==22.3.0 # via jupyter-client -readme-renderer==29.0 +readme-renderer==30.0 # via twine recommonmark==0.7.1 # via -r requirements/dev-requirements.in -regex==2021.8.28 +regex==2021.10.8 # via black requests==2.26.0 # via @@ -215,7 +215,7 @@ requests-toolbelt==0.9.1 # twine rfc3986==1.5.0 # via twine -rope==0.19.0 +rope==0.20.1 # via -r requirements/dev-requirements.in secretstorage==3.3.1 # via keyring @@ -227,9 +227,7 @@ six==1.16.0 # via # -c requirements/requirements-py39.txt # bleach - # jsonschema # python-dateutil - # readme-renderer # tox # virtualenv smmap==4.0.0 @@ -244,7 +242,7 @@ sphinx==4.2.0 # nbsphinx # recommonmark # sphinx-rtd-theme -sphinx-rtd-theme==0.5.2 +sphinx-rtd-theme==1.0.0 # via -r requirements/dev-requirements.in sphinxcontrib-applehelp==1.0.2 # via sphinx @@ -264,19 +262,19 @@ toml==0.10.2 # via # pylint # pytest - # pytest-cov # tox tomli==1.2.1 # via # black + # coverage # setuptools-scm tomlkit==0.7.0 # via python-semantic-release tornado==6.1 # via jupyter-client -tox==3.24.3 +tox==3.24.4 # via -r requirements/dev-requirements.in -tqdm==4.62.2 +tqdm==4.62.3 # via twine traitlets==5.1.0 # via @@ -289,10 +287,14 @@ traitlets==5.1.0 twine==3.4.2 # via python-semantic-release typing-extensions==3.10.0.2 - # via black -urllib3==1.26.6 + # via + # astroid + # black + # gitpython + # pylint +urllib3==1.26.7 # via requests -virtualenv==20.7.2 +virtualenv==20.8.1 # via tox webencodings==0.5.1 # via bleach @@ -300,7 +302,7 @@ wheel==0.37.0 # via python-semantic-release wrapt==1.12.1 # via astroid -zipp==3.5.0 +zipp==3.6.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/requirements-py36.txt b/requirements/requirements-py36.txt index 10575bb83..c1eccf831 100644 --- a/requirements/requirements-py36.txt +++ b/requirements/requirements-py36.txt @@ -24,7 +24,7 @@ numpy==1.19.5 # time-series-buffer pandas==1.1.5 # via PyDynamic (setup.py) -pillow==8.3.2 +pillow==8.4.0 # via matplotlib pyparsing==2.4.7 # via matplotlib @@ -32,7 +32,7 @@ python-dateutil==2.8.2 # via # matplotlib # pandas -pytz==2021.1 +pytz==2021.3 # via pandas pywavelets==1.1.1 # via PyDynamic (setup.py) @@ -42,7 +42,7 @@ six==1.16.0 # via # cycler # python-dateutil -sympy==1.8 +sympy==1.9 # via PyDynamic (setup.py) time-series-buffer==0.1.4b0 # via PyDynamic (setup.py) diff --git a/requirements/requirements-py37.txt b/requirements/requirements-py37.txt index 79842ff91..598e81cfb 100644 --- a/requirements/requirements-py37.txt +++ b/requirements/requirements-py37.txt @@ -24,7 +24,7 @@ numpy==1.21.2 # time-series-buffer pandas==1.3.3 # via PyDynamic (setup.py) -pillow==8.3.2 +pillow==8.4.0 # via matplotlib pyparsing==2.4.7 # via matplotlib @@ -32,7 +32,7 @@ python-dateutil==2.8.2 # via # matplotlib # pandas -pytz==2021.1 +pytz==2021.3 # via pandas pywavelets==1.1.1 # via PyDynamic (setup.py) @@ -42,7 +42,7 @@ six==1.16.0 # via # cycler # python-dateutil -sympy==1.8 +sympy==1.9 # via PyDynamic (setup.py) time-series-buffer==0.1.4b0 # via PyDynamic (setup.py) diff --git a/requirements/requirements-py38.txt b/requirements/requirements-py38.txt index e658966d8..3a4f16865 100644 --- a/requirements/requirements-py38.txt +++ b/requirements/requirements-py38.txt @@ -24,7 +24,7 @@ numpy==1.21.2 # time-series-buffer pandas==1.3.3 # via PyDynamic (setup.py) -pillow==8.3.2 +pillow==8.4.0 # via matplotlib pyparsing==2.4.7 # via matplotlib @@ -32,7 +32,7 @@ python-dateutil==2.8.2 # via # matplotlib # pandas -pytz==2021.1 +pytz==2021.3 # via pandas pywavelets==1.1.1 # via PyDynamic (setup.py) @@ -42,7 +42,7 @@ six==1.16.0 # via # cycler # python-dateutil -sympy==1.8 +sympy==1.9 # via PyDynamic (setup.py) time-series-buffer==0.1.4b0 # via PyDynamic (setup.py) diff --git a/requirements/requirements-py39.txt b/requirements/requirements-py39.txt index 0a455545e..70e0c9c9c 100644 --- a/requirements/requirements-py39.txt +++ b/requirements/requirements-py39.txt @@ -22,9 +22,9 @@ numpy==1.21.2 # pywavelets # scipy # time-series-buffer -pandas==1.3.2 +pandas==1.3.3 # via PyDynamic (setup.py) -pillow==8.3.2 +pillow==8.4.0 # via matplotlib pyparsing==2.4.7 # via matplotlib @@ -32,7 +32,7 @@ python-dateutil==2.8.2 # via # matplotlib # pandas -pytz==2021.1 +pytz==2021.3 # via pandas pywavelets==1.1.1 # via PyDynamic (setup.py) @@ -42,7 +42,7 @@ six==1.16.0 # via # cycler # python-dateutil -sympy==1.8 +sympy==1.9 # via PyDynamic (setup.py) time-series-buffer==0.1.4b0 # via PyDynamic (setup.py) diff --git a/src/PyDynamic/__init__.py b/src/PyDynamic/__init__.py index 6b576cfce..ebbdd6255 100644 --- a/src/PyDynamic/__init__.py +++ b/src/PyDynamic/__init__.py @@ -9,11 +9,6 @@ __version__ = "1.11.1" __all__ = [ - "invLSFIR", - "invLSIIR", - "invLSFIR_unc", - "invLSFIR_uncMC", - "invLSIIR_unc", "LSFIR", "LSIIR", "fit_som", @@ -76,4 +71,4 @@ from .misc import * from .model_estimation import * -from .uncertainty import * \ No newline at end of file +from .uncertainty import * diff --git a/src/PyDynamic/deconvolution/__init__.py b/src/PyDynamic/deconvolution/__init__.py index 8fdd872db..26785fda9 100644 --- a/src/PyDynamic/deconvolution/__init__.py +++ b/src/PyDynamic/deconvolution/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. deprecated:: 2.0.0 The module *deconvolution* is combined with the module *identification* and @@ -12,18 +11,4 @@ in the module *deconvolution*. """ -from .fit_filter import ( - LSFIR, - LSIIR, - LSFIR_unc, - LSFIR_uncMC, - LSIIR_unc, -) - -__all__ = [ - "LSFIR", - "LSIIR", - "LSFIR_unc", - "LSFIR_uncMC", - "LSIIR_unc", -] +from .fit_filter import LSFIR, LSFIR_unc, LSFIR_uncMC, LSIIR, LSIIR_unc diff --git a/src/PyDynamic/deconvolution/fit_filter.py b/src/PyDynamic/deconvolution/fit_filter.py index 5021cb8a1..420f474f4 100755 --- a/src/PyDynamic/deconvolution/fit_filter.py +++ b/src/PyDynamic/deconvolution/fit_filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. deprecated:: 2.0.0 The module *deconvolution* is combined with the module *identification* and @@ -24,14 +23,6 @@ "in the module *deconvolution*. Please use the current function name " ) -__all__ = [ - "LSFIR", - "LSIIR", - "LSFIR_unc", - "LSFIR_uncMC", - "LSIIR_unc", -] - def LSFIR(*args, **kwargs): """.. deprecated:: 2.0.0 Please use :func:`PyDynamic.model_estimation.LSFIR`""" diff --git a/src/PyDynamic/examples/digital_filtering/Design of a digital deconvolution filter (FIR type).ipynb b/src/PyDynamic/examples/digital_filtering/Design of a digital deconvolution filter (FIR type).ipynb new file mode 100644 index 000000000..c10665f0f --- /dev/null +++ b/src/PyDynamic/examples/digital_filtering/Design of a digital deconvolution filter (FIR type).ipynb @@ -0,0 +1,544 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "a2440e75", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import pathlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a86db08", + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import scipy.signal as dsp\n", + "from scipy.linalg import toeplitz\n", + "from scipy.signal import lfilter, lfilter_zi\n", + "\n", + "rst = np.random.RandomState(1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "215f7d02", + "metadata": {}, + "outputs": [], + "source": [ + "from PyDynamic.model_estimation.fit_filter import LSFIR\n", + "from PyDynamic.misc.SecondOrderSystem import *\n", + "from PyDynamic.misc.filterstuff import kaiser_lowpass\n", + "from PyDynamic.misc.tools import make_semiposdef, trimOrPad" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2b9a8837", + "metadata": {}, + "outputs": [], + "source": [ + "def legacy_FIRuncFilter(\n", + " y, sigma_noise, theta, Utheta=None, shift=0, blow=None, kind=\"corr\"\n", + "):\n", + " \"\"\"Uncertainty propagation for signal y and uncertain FIR filter theta\n", + "\n", + " A preceding FIR low-pass filter with coefficients `blow` can be provided optionally.\n", + "\n", + " Parameters\n", + " ----------\n", + " y : np.ndarray\n", + " filter input signal\n", + " sigma_noise : float or np.ndarray\n", + " float: standard deviation of white noise in y\n", + " 1D-array: interpretation depends on kind\n", + " theta : np.ndarray\n", + " FIR filter coefficients\n", + " Utheta : np.ndarray, optional\n", + " covariance matrix associated with theta\n", + " if the filter is fully certain, use `Utheta = None` (default) to make use of\n", + " more efficient calculations. See also the comparison given in\n", + " \n", + " shift : int, optional\n", + " time delay of filter output signal (in samples) (defaults to 0)\n", + " blow : np.ndarray, optional\n", + " optional FIR low-pass filter\n", + " kind : string\n", + " only meaningful in combination with sigma_noise a 1D numpy array\n", + " \"diag\": point-wise standard uncertainties of non-stationary white noise\n", + " \"corr\": single sided autocovariance of stationary (colored/correlated)\n", + " noise (default)\n", + "\n", + " Returns\n", + " -------\n", + " x : np.ndarray\n", + " FIR filter output signal\n", + " ux : np.ndarray\n", + " point-wise standard uncertainties associated with x\n", + "\n", + "\n", + " References\n", + " ----------\n", + "\n", + " * Elster and Link 2008 [Elster2008]_\n", + "\n", + " .. seealso:: :mod:`PyDynamic.deconvolution.fit_filter`\n", + "\n", + " \"\"\"\n", + "\n", + " Ntheta = len(theta) # FIR filter size\n", + "\n", + " # check which case of sigma_noise is necessary\n", + " if isinstance(sigma_noise, float):\n", + " sigma2 = sigma_noise ** 2\n", + "\n", + " elif isinstance(sigma_noise, np.ndarray) and len(sigma_noise.shape) == 1:\n", + " if kind == \"diag\":\n", + " sigma2 = sigma_noise ** 2\n", + " elif kind == \"corr\":\n", + " sigma2 = sigma_noise\n", + " else:\n", + " raise ValueError(\"unknown kind of sigma_noise\")\n", + "\n", + " else:\n", + " raise ValueError(\n", + " f\"FIRuncFilter: Uncertainty sigma_noise associated \"\n", + " f\"with input signal is expected to be either a float or a 1D array but \"\n", + " f\"is of shape {sigma_noise.shape}. Please check documentation for input \"\n", + " f\"parameters sigma_noise and kind for more information.\"\n", + " )\n", + "\n", + " if isinstance(\n", + " blow, np.ndarray\n", + " ): # calculate low-pass filtered signal and propagate noise\n", + "\n", + " if isinstance(sigma2, float):\n", + " Bcorr = np.correlate(blow, blow, \"full\") # len(Bcorr) == 2*Ntheta - 1\n", + " ycorr = (\n", + " sigma2 * Bcorr[len(blow) - 1 :]\n", + " ) # only the upper half of the correlation is needed\n", + "\n", + " # trim / pad to length Ntheta\n", + " ycorr = trimOrPad(ycorr, Ntheta)\n", + " Ulow = toeplitz(ycorr)\n", + "\n", + " elif isinstance(sigma2, np.ndarray):\n", + "\n", + " if kind == \"diag\":\n", + " # [Leeuw1994](Covariance matrix of ARMA errors in closed form) can be\n", + " # used, to derive this formula\n", + " # The given \"blow\" corresponds to a MA(q)-process.\n", + " # Going through the calculations of Leeuw, but assuming\n", + " # that E(vv^T) is a diagonal matrix with non-identical elements,\n", + " # the covariance matrix V becomes (see Leeuw:corollary1)\n", + " # V = N * SP * N^T + M * S * M^T\n", + " # N, M are defined as in the paper\n", + " # and SP is the covariance of input-noise prior to the observed\n", + " # time-interval (SP needs be available len(blow)-time steps into the\n", + " # past. Here it is assumed, that SP is constant with the first value\n", + " # of sigma2)\n", + "\n", + " # V needs to be extended to cover Ntheta-1 time steps more into the past\n", + " sigma2_extended = np.append(sigma2[0] * np.ones((Ntheta - 1)), sigma2)\n", + "\n", + " N = toeplitz(blow[1:][::-1], np.zeros_like(sigma2_extended)).T\n", + " M = toeplitz(\n", + " trimOrPad(blow, len(sigma2_extended)),\n", + " np.zeros_like(sigma2_extended),\n", + " )\n", + " SP = np.diag(sigma2[0] * np.ones_like(blow[1:]))\n", + " S = np.diag(sigma2_extended)\n", + "\n", + " # Ulow is to be sliced from V, see below\n", + " V = N.dot(SP).dot(N.T) + M.dot(S).dot(M.T)\n", + "\n", + " elif kind == \"corr\":\n", + "\n", + " # adjust the lengths sigma2 to fit blow and theta\n", + " # this either crops (unused) information or appends zero-information\n", + " # note1: this is the reason, why Ulow will have dimension\n", + " # (Ntheta x Ntheta) without further ado\n", + "\n", + " # calculate Bcorr\n", + " Bcorr = np.correlate(blow, blow, \"full\")\n", + "\n", + " # pad or crop length of sigma2, then reflect some part to the left and\n", + " # invert the order\n", + " # [0 1 2 3 4 5 6 7] --> [0 0 0 7 6 5 4 3 2 1 0 1 2 3]\n", + " sigma2 = trimOrPad(sigma2, len(blow) + Ntheta - 1)\n", + " sigma2_reflect = np.pad(sigma2, (len(blow) - 1, 0), mode=\"reflect\")\n", + "\n", + " ycorr = np.correlate(\n", + " sigma2_reflect, Bcorr, mode=\"valid\"\n", + " ) # used convolve in a earlier version, should make no difference as\n", + " # Bcorr is symmetric\n", + " Ulow = toeplitz(ycorr)\n", + "\n", + " xlow, _ = lfilter(blow, 1.0, y, zi=y[0] * lfilter_zi(blow, 1.0))\n", + "\n", + " else: # if blow is not provided\n", + " if isinstance(sigma2, float):\n", + " Ulow = np.eye(Ntheta) * sigma2\n", + "\n", + " elif isinstance(sigma2, np.ndarray):\n", + "\n", + " if kind == \"diag\":\n", + " # V needs to be extended to cover Ntheta time steps more into the past\n", + " sigma2_extended = np.append(sigma2[0] * np.ones((Ntheta - 1)), sigma2)\n", + "\n", + " # Ulow is to be sliced from V, see below\n", + " V = np.diag(\n", + " sigma2_extended\n", + " ) # this is not Ulow, same thing as in the case of a provided blow\n", + " # (see above)\n", + "\n", + " elif kind == \"corr\":\n", + " Ulow = toeplitz(trimOrPad(sigma2, Ntheta))\n", + "\n", + " xlow = y\n", + "\n", + " # apply FIR filter to calculate best estimate in accordance with GUM\n", + " x, _ = lfilter(theta, 1.0, xlow, zi=xlow[0] * lfilter_zi(theta, 1.0))\n", + " x = np.roll(x, -int(shift))\n", + "\n", + " # add dimension to theta, otherwise transpose won't work\n", + " if len(theta.shape) == 1:\n", + " theta = theta[:, np.newaxis]\n", + "\n", + " # NOTE: In the code below wherever `theta` or `Utheta` get used, they need to be\n", + " # flipped. This is necessary to take the time-order of both variables into\n", + " # account. (Which is descending for `theta` and `Utheta` but ascending for `Ulow`.)\n", + " #\n", + " # Further details and illustrations showing the effect of not-flipping\n", + " # can be found at https://github.com/PTB-M4D/PyDynamic/issues/183\n", + "\n", + " # handle diag-case, where Ulow needs to be sliced from V\n", + " if kind == \"diag\":\n", + " # UncCov needs to be calculated inside in its own for-loop\n", + " # V has dimension (len(sigma2) + Ntheta) * (len(sigma2) + Ntheta) --> slice a\n", + " # fitting Ulow of dimension (Ntheta x Ntheta)\n", + " UncCov = np.zeros((len(sigma2)))\n", + "\n", + " if isinstance(Utheta, np.ndarray):\n", + " for k in range(len(sigma2)):\n", + " Ulow = V[k : k + Ntheta, k : k + Ntheta]\n", + " UncCov[k] = np.squeeze(\n", + " np.flip(theta).T.dot(Ulow.dot(np.flip(theta)))\n", + " + np.abs(np.trace(Ulow.dot(np.flip(Utheta))))\n", + " ) # static part of uncertainty\n", + " else:\n", + " for k in range(len(sigma2)):\n", + " Ulow = V[k : k + Ntheta, k : k + Ntheta]\n", + " UncCov[k] = np.squeeze(\n", + " np.flip(theta).T.dot(Ulow.dot(np.flip(theta)))\n", + " ) # static part of uncertainty\n", + "\n", + " else:\n", + " if isinstance(Utheta, np.ndarray):\n", + " UncCov = np.flip(theta).T.dot(Ulow.dot(np.flip(theta))) + np.abs(\n", + " np.trace(Ulow.dot(np.flip(Utheta)))\n", + " ) # static part of uncertainty\n", + " else:\n", + " UncCov = np.flip(theta).T.dot(\n", + " Ulow.dot(np.flip(theta))\n", + " ) # static part of uncertainty\n", + "\n", + " if isinstance(Utheta, np.ndarray):\n", + " unc = np.empty_like(y)\n", + "\n", + " # use extended signal to match assumption of stationary signal prior to first\n", + " # entry\n", + " xlow_extended = np.append(np.full(Ntheta - 1, xlow[0]), xlow)\n", + "\n", + " for m in range(len(xlow)):\n", + " # extract necessary part from input signal\n", + " XL = xlow_extended[m : m + Ntheta, np.newaxis]\n", + " unc[m] = XL.T.dot(np.flip(Utheta).dot(XL)) # apply formula from paper\n", + " else:\n", + " unc = np.zeros_like(y)\n", + "\n", + " ux = np.sqrt(np.abs(UncCov + unc))\n", + " ux = np.roll(ux, -int(shift)) # correct for delay\n", + "\n", + " return x, ux.flatten() # flatten in case that we still have 2D array" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c1ce45d", + "metadata": {}, + "outputs": [], + "source": [ + "# parameters of simulated measurement\n", + "Fs = 500e3\n", + "Ts = 1 / Fs\n", + "\n", + "# sensor/measurement system\n", + "f0 = 36e3\n", + "uf0 = 0.01 * f0\n", + "S0 = 0.4\n", + "uS0 = 0.001 * S0\n", + "delta = 0.01\n", + "udelta = 0.1 * delta\n", + "\n", + "# transform continuous system to digital filter\n", + "bc, ac = sos_phys2filter(S0, delta, f0)\n", + "b, a = dsp.bilinear(bc, ac, Fs)\n", + "\n", + "# Monte Carlo for calculation of unc. assoc. with [real(H),imag(H)]\n", + "f = np.linspace(0, 120e3, 200)\n", + "Hfc = sos_FreqResp(S0, delta, f0, f)\n", + "Hf = dsp.freqz(b, a, 2 * np.pi * f / Fs)[1]\n", + "\n", + "runs = 10000\n", + "MCS0 = S0 + rst.randn(runs) * uS0\n", + "MCd = delta + rst.randn(runs) * udelta\n", + "MCf0 = f0 + rst.randn(runs) * uf0\n", + "HMC = np.zeros((runs, len(f)), dtype=complex)\n", + "for k in range(runs):\n", + " bc_, ac_ = sos_phys2filter(MCS0[k], MCd[k], MCf0[k])\n", + " b_, a_ = dsp.bilinear(bc_, ac_, Fs)\n", + " HMC[k, :] = dsp.freqz(b_, a_, 2 * np.pi * f / Fs)[1]\n", + "\n", + "H = np.r_[np.real(Hf), np.imag(Hf)]\n", + "uAbs = np.std(np.abs(HMC), axis=0)\n", + "uPhas = np.std(np.angle(HMC), axis=0)\n", + "UH = np.cov(np.hstack((np.real(HMC), np.imag(HMC))), rowvar=0)\n", + "UH = make_semiposdef(UH)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98d0f083", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(16, 8))\n", + "plt.errorbar(f * 1e-3, np.abs(Hf), uAbs, fmt=\".\")\n", + "plt.title(\"measured amplitude spectrum with associated uncertainties\")\n", + "plt.xlim(0, 50)\n", + "plt.xlabel(\"frequency / kHz\", fontsize=20)\n", + "plt.ylabel(\"magnitude / au\", fontsize=20)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "712a9be8", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(16, 8))\n", + "plt.errorbar(f * 1e-3, np.angle(Hf), uPhas, fmt=\".\")\n", + "plt.title(\"measured phase spectrum with associated uncertainties\")\n", + "plt.xlim(0, 50)\n", + "plt.xlabel(\"frequency / kHz\", fontsize=20)\n", + "plt.ylabel(\"phase / rad\", fontsize=20)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8dfceebc", + "metadata": {}, + "outputs": [], + "source": [ + "# simulate input and output signals\n", + "time = np.arange(0, 4e-3 - Ts, Ts)\n", + "# x = shocklikeGaussian(time, t0 = 2e-3, sigma = 1e-5, m0=0.8)\n", + "m0 = 0.8\n", + "sigma = 1e-5\n", + "t0 = 2e-3\n", + "x = (\n", + " -m0\n", + " * (time - t0)\n", + " / sigma\n", + " * np.exp(0.5)\n", + " * np.exp(-((time - t0) ** 2) / (2 * sigma ** 2))\n", + ")\n", + "y = dsp.lfilter(b, a, x)\n", + "noise = 1e-3\n", + "yn = y + rst.randn(np.size(y)) * noise" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f1db74f", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(16, 8))\n", + "plt.plot(time * 1e3, x, label=\"system input signal\")\n", + "plt.plot(time * 1e3, yn, label=\"measured output signal\")\n", + "plt.legend(fontsize=20)\n", + "plt.xlim(1.8, 4)\n", + "plt.ylim(-1, 1)\n", + "plt.xlabel(\"time / ms\", fontsize=20)\n", + "plt.ylabel(r\"signal amplitude / $m/s^2$\", fontsize=20)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "acd06d3e", + "metadata": {}, + "outputs": [], + "source": [ + "# Calculation of FIR deconvolution filter and its assoc. unc.\n", + "N = 12\n", + "tau = N // 2\n", + "bF, UbF = LSFIR(H, N, tau, f, Fs, UH=UH)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5376d0e9", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(16, 8))\n", + "plt.errorbar(range(N + 1), bF, np.sqrt(np.diag(UbF)), fmt=\"o\")\n", + "plt.xlabel(\"FIR coefficient index\", fontsize=20)\n", + "plt.ylabel(\"FIR coefficient value\", fontsize=20)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dbfe0c57", + "metadata": {}, + "outputs": [], + "source": [ + "fcut = f0 + 10e3\n", + "low_order = 100\n", + "blow, lshift = kaiser_lowpass(low_order, fcut, Fs)\n", + "shift = tau + lshift" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "654bb06d", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(16, 10))\n", + "HbF = (\n", + " dsp.freqz(bF, 1, 2 * np.pi * f / Fs)[1] * dsp.freqz(blow, 1, 2 * np.pi * f / Fs)[1]\n", + ")\n", + "plt.semilogy(f * 1e-3, np.abs(Hf), label=\"measured frequency response\")\n", + "plt.semilogy(f * 1e-3, np.abs(HbF), label=\"inverse filter\")\n", + "plt.semilogy(f * 1e-3, np.abs(Hf * HbF), label=\"compensation result\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db3ec711", + "metadata": {}, + "outputs": [], + "source": [ + "xhat, Uxhat = legacy_FIRuncFilter(yn, noise, bF, UbF, shift, blow)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "562090fe", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(16, 8))\n", + "plt.plot(time * 1e3, x, label=\"input signal\")\n", + "plt.plot(time * 1e3, yn, label=\"output signal\")\n", + "plt.plot(time * 1e3, xhat, label=\"estimate of input\")\n", + "plt.legend(fontsize=20)\n", + "plt.xlabel(\"time / ms\", fontsize=22)\n", + "plt.ylabel(\"signal amplitude / au\", fontsize=22)\n", + "plt.tick_params(which=\"both\", labelsize=16)\n", + "plt.xlim(1.9, 2.4)\n", + "plt.ylim(-1, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba4632f0", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(16, 10))\n", + "plt.plot(time * 1e3, Uxhat)\n", + "plt.xlabel(\"time / ms\", fontsize=22)\n", + "plt.ylabel(\"signal uncertainty / au\", fontsize=22)\n", + "plt.subplots_adjust(left=0.15, right=0.95)\n", + "plt.tick_params(which=\"both\", labelsize=16)\n", + "plt.xlim(1.9, 2.4)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.9 (PyDynamic)", + "language": "python", + "name": "pydynamic-py39" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.6" + }, + "varInspector": { + "cols": { + "lenName": 16, + "lenType": 16, + "lenVar": 40 + }, + "kernels_config": { + "python": { + "delete_cmd_postfix": "", + "delete_cmd_prefix": "del ", + "library": "var_list.py", + "varRefreshCmd": "print(var_dic_list())" + }, + "r": { + "delete_cmd_postfix": ") ", + "delete_cmd_prefix": "rm(", + "library": "var_list.r", + "varRefreshCmd": "cat(var_dic_list()) " + } + }, + "types_to_exclude": [ + "module", + "function", + "builtin_function_or_method", + "instance", + "_Feature" + ], + "window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/src/PyDynamic/examples/digital_filtering/deconv_FIR.py b/src/PyDynamic/examples/digital_filtering/deconv_FIR.py index ad9d5afe3..05ebee122 100644 --- a/src/PyDynamic/examples/digital_filtering/deconv_FIR.py +++ b/src/PyDynamic/examples/digital_filtering/deconv_FIR.py @@ -1,119 +1,140 @@ -# -*- coding: utf-8 -*- -""" - Fit of a FIR filter to a simulated reciprocal frequency response of a - second-order dynamic system. The deconvolution filter is applied to the - simulated response of the system to a shock-like Gaussian signal. - In this example uncertainties associated with the simulated system are - propagated to the estimate of the input signal. +"""Fit of a FIR filter to a simulated reciprocal frequency response +Fit of a FIR filter to a simulated reciprocal frequency response of a +second-order dynamic system. The deconvolution filter is applied to the +simulated response of the system to a shock-like Gaussian signal. +In this example uncertainties associated with the simulated system are +propagated to the estimate of the input signal. """ +import matplotlib.pyplot as plt import numpy as np import scipy.signal as dsp -import matplotlib.pyplot as plt import PyDynamic.model_estimation.fit_filter as model_est +from PyDynamic.misc.filterstuff import db, kaiser_lowpass from PyDynamic.misc.SecondOrderSystem import sos_phys2filter from PyDynamic.misc.testsignals import shocklikeGaussian -from PyDynamic.misc.filterstuff import kaiser_lowpass, db -from PyDynamic.uncertainty.propagate_filter import FIRuncFilter from PyDynamic.misc.tools import make_semiposdef +from PyDynamic.uncertainty.propagate_filter import FIRuncFilter rst = np.random.RandomState(10) ##### FIR filter parameters N = 12 # filter order -tau = 6 # time delay +tau = 6 # time delay # parameters of simulated measurement -Fs = 500e3 # sampling frequency (in Hz) -Ts = 1 / Fs # sampling intervall length (in s) +Fs = 500e3 # sampling frequency (in Hz) +Ts = 1 / Fs # sampling intervall length (in s) # sensor/measurement system -f0 = 36e3; uf0 = 0.01*f0 # resonance frequency -S0 = 0.124; uS0= 0.001*S0 # static gain -delta = 0.0055; udelta = 0.1*delta # damping +f0 = 36e3 +uf0 = 0.01 * f0 # resonance frequency +S0 = 0.124 +uS0 = 0.001 * S0 # static gain +delta = 0.0055 +udelta = 0.1 * delta # damping # transform continuous system to digital filter -bc, ac = sos_phys2filter(S0,delta,f0) # calculate analogue filter coefficients -b, a = dsp.bilinear(bc, ac, Fs) # transform to digital filter coefficients +bc, ac = sos_phys2filter(S0, delta, f0) # calculate analogue filter coefficients +b, a = dsp.bilinear(bc, ac, Fs) # transform to digital filter coefficients # simulate input and output signals -time = np.arange(0, 4e-3 - Ts, Ts) # time values -x = shocklikeGaussian(time, t0 = 2e-3, sigma = 1e-5, m0=0.8) # input signal -y = dsp.lfilter(b, a, x) # output signal -noise = 1e-3 # noise std -yn = y + rst.randn(np.size(y)) * noise # add white noise +time = np.arange(0, 4e-3 - Ts, Ts) # time values +x = shocklikeGaussian(time, t0=2e-3, sigma=1e-5, m0=0.8) # input signal +y = dsp.lfilter(b, a, x) # output signal +noise = 1e-3 # noise std +yn = y + rst.randn(np.size(y)) * noise # add white noise # Monte Carlo for calculation of unc. assoc. with [real(H),imag(H)] runs = 10000 -MCf0 = f0 + rst.randn(runs)*uf0 # MC draws of resonance frequency -MCS0 = S0 + rst.randn(runs)*uS0 # MC draws of static gain -MCd = delta+ rst.randn(runs)*udelta # MC draws of damping -f = np.linspace(0, 120e3, 200) # frequencies at which to calculate frequency response -HMC = np.zeros((runs, len(f)),dtype=complex) # initialise frequency response with zeros -for k in range(runs): # actual Monte Carlo - bc_,ac_ = sos_phys2filter(MCS0[k], MCd[k], MCf0[k]) # calculate analogue filter coefficients - b_,a_ = dsp.bilinear(bc_,ac_,Fs) # transform to digital filter coefficients - HMC[k,:] = dsp.freqz(b_,a_,2*np.pi*f/Fs)[1] # calculate DFT frequency response - -Hc = np.mean(HMC,dtype=complex,axis=0) # best estimate (complex-valued) -H = np.r_[np.real(Hc), np.imag(Hc)] # best estimate in real, imag -UH= np.cov(np.c_[np.real(HMC),np.imag(HMC)],rowvar=0) # covariance of real, imag -UH= make_semiposdef(UH, verbose=True) # correct for numerical errors - -bF, UbF = model_est.invLSFIR_unc(H,UH,N,tau,f,Fs) # Calculation of FIR deconvolution filter and its assoc. unc. -CbF = UbF/(np.tile(np.sqrt(np.diag(UbF))[:,np.newaxis],(1,N+1))* - np.tile(np.sqrt(np.diag(UbF))[:,np.newaxis].T,(N+1,1))) # correlation of filter coefficients +MCf0 = f0 + rst.randn(runs) * uf0 # MC draws of resonance frequency +MCS0 = S0 + rst.randn(runs) * uS0 # MC draws of static gain +MCd = delta + rst.randn(runs) * udelta # MC draws of damping +f = np.linspace(0, 120e3, 200) # frequencies at which to calculate frequency response +HMC = np.zeros( + (runs, len(f)), dtype=complex +) # initialise frequency response with zeros +for k in range(runs): # actual Monte Carlo + bc_, ac_ = sos_phys2filter( + MCS0[k], MCd[k], MCf0[k] + ) # calculate analogue filter coefficients + b_, a_ = dsp.bilinear(bc_, ac_, Fs) # transform to digital filter coefficients + HMC[k, :] = dsp.freqz(b_, a_, 2 * np.pi * f / Fs)[ + 1 + ] # calculate DFT frequency response + +Hc = np.mean(HMC, dtype=complex, axis=0) # best estimate (complex-valued) +H = np.r_[np.real(Hc), np.imag(Hc)] # best estimate in real, imag +UH = np.cov(np.c_[np.real(HMC), np.imag(HMC)], rowvar=0) # covariance of real, imag +UH = make_semiposdef(UH, verbose=True) # correct for numerical errors + +bF, UbF = model_est.LSFIR(H, N, tau, f, Fs, UH=UH) +# Calculation of FIR deconvolution filter and its assoc. unc. +CbF = UbF / ( + np.tile(np.sqrt(np.diag(UbF))[:, np.newaxis], (1, N + 1)) + * np.tile(np.sqrt(np.diag(UbF))[:, np.newaxis].T, (N + 1, 1)) +) # correlation of filter coefficients # Deconvolution Step1: lowpass filter for noise attenuation -fcut = f0+20e3; low_order = 100 # cut-off frequency and filter order -blow, lshift = kaiser_lowpass(low_order, fcut, Fs) # FIR low pass filter coefficients -shift = tau + lshift # delay of low-pass plus that of the FIR deconv filter +fcut = f0 + 20e3 +low_order = 100 # cut-off frequency and filter order +blow, lshift = kaiser_lowpass(low_order, fcut, Fs) # FIR low pass filter coefficients +shift = tau + lshift # delay of low-pass plus that of the FIR deconv filter # Deconvolution Step2: Application of deconvolution filter -xhat,Uxhat = FIRuncFilter(yn,noise,bF,UbF,shift,blow) # apply low-pass and FIR deconv filter +xhat, Uxhat = FIRuncFilter( + yn, noise, bF, UbF, shift, blow +) # apply low-pass and FIR deconv filter # Plot of results fplot = np.linspace(0, 80e3, 1000) -Hc = dsp.freqz(b,a, 2*np.pi*fplot/Fs)[1] +Hc = dsp.freqz(b, a, 2 * np.pi * fplot / Fs)[1] Hif = dsp.freqz(bF, 1.0, 2 * np.pi * fplot / Fs)[1] Hl = dsp.freqz(blow, 1.0, 2 * np.pi * fplot / Fs)[1] -plt.figure(1); plt.clf() -plt.plot(fplot*1e-3, db(Hc), fplot*1e-3, db(Hif*Hl), fplot*1e-3, db(Hc*Hif*Hl)) -plt.legend(('System freq. resp.', 'compensation filter','compensation result')) +plt.figure(1) +plt.clf() +plt.plot( + fplot * 1e-3, db(Hc), fplot * 1e-3, db(Hif * Hl), fplot * 1e-3, db(Hc * Hif * Hl) +) +plt.legend(("System freq. resp.", "compensation filter", "compensation result")) # plt.title('Amplitude of frequency responses') -plt.xlabel('frequency / kHz',fontsize=22) -plt.ylabel('amplitude / dB',fontsize=22) -plt.tick_params(which="both",labelsize=16) - -fig = plt.figure(2);plt.clf() -ax = fig.add_subplot(1,1,1) -plt.imshow(UbF,interpolation="nearest") +plt.xlabel("frequency / kHz", fontsize=22) +plt.ylabel("amplitude / dB", fontsize=22) +plt.tick_params(which="both", labelsize=16) + +fig = plt.figure(2) +plt.clf() +ax = fig.add_subplot(1, 1, 1) +plt.imshow(UbF, interpolation="nearest") plt.colorbar(ax=ax) # plt.title('Uncertainty of deconvolution filter coefficients') -plt.figure(3); plt.clf() -plt.plot(time*1e3,np.c_[x,yn,xhat]) -plt.legend(('input signal','output signal','estimate of input')) +plt.figure(3) +plt.clf() +plt.plot(time * 1e3, np.c_[x, yn, xhat]) +plt.legend(("input signal", "output signal", "estimate of input")) # plt.title('time domain signals') -plt.xlabel('time / ms',fontsize=22) -plt.ylabel('signal amplitude / au',fontsize=22) -plt.tick_params(which="both",labelsize=16) -plt.xlim(1.5,4) -plt.ylim(-0.41,0.81) - -plt.figure(4);plt.clf() -plt.plot(time*1e3,Uxhat) +plt.xlabel("time / ms", fontsize=22) +plt.ylabel("signal amplitude / au", fontsize=22) +plt.tick_params(which="both", labelsize=16) +plt.xlim(1.5, 4) +plt.ylim(-0.41, 0.81) + +plt.figure(4) +plt.clf() +plt.plot(time * 1e3, Uxhat) # plt.title('Uncertainty of estimated input signal') -plt.xlabel('time / ms',fontsize=22) -plt.ylabel('signal uncertainty / au',fontsize=22) -plt.subplots_adjust(left=0.15,right=0.95) -plt.tick_params(which='both', labelsize=16) -plt.xlim(1.5,4) - -plt.figure(5);plt.clf() -plt.errorbar(range(N+1), bF, np.sqrt(np.diag(UbF))) +plt.xlabel("time / ms", fontsize=22) +plt.ylabel("signal uncertainty / au", fontsize=22) +plt.subplots_adjust(left=0.15, right=0.95) +plt.tick_params(which="both", labelsize=16) +plt.xlim(1.5, 4) + +plt.figure(5) +plt.clf() +plt.errorbar(range(N + 1), bF, np.sqrt(np.diag(UbF))) plt.show() diff --git a/src/PyDynamic/examples/digital_filtering/identify_IIR.py b/src/PyDynamic/examples/digital_filtering/identify_IIR.py index 8b32131fc..cd90003ee 100644 --- a/src/PyDynamic/examples/digital_filtering/identify_IIR.py +++ b/src/PyDynamic/examples/digital_filtering/identify_IIR.py @@ -26,7 +26,7 @@ Fs = 500e3 # sampling frequency Na = 4; Nb = 4 # IIR filter order (Na - denominator, Nb - numerator) -b, a, tau = fit_filter.LSIIR(Hvals, Na, Nb, f, Fs) # fit IIR filter to freq response +b, a, tau = fit_filter.LSIIR(Hvals, Na, Nb, f, Fs) # fit IIR filter to freq response fplot = np.linspace(0, 80e3, 1000) # frequency range for the plot Hc = sos_FreqResp(S0, delta, f0, fplot) # frequency response of the 2nd order system diff --git a/src/PyDynamic/misc/filterstuff.py b/src/PyDynamic/misc/filterstuff.py index c719425fb..77e098dda 100644 --- a/src/PyDynamic/misc/filterstuff.py +++ b/src/PyDynamic/misc/filterstuff.py @@ -13,7 +13,7 @@ to the unit circle * :func:`kaiser_lowpass`: Design of a FIR lowpass filter using the window technique with a Kaiser window. -* :func:`isstable`: Determine whether a given IIR filter is stable +* :func:`isstable`: Determine whether an IIR filter with certain coefficients is stable * :func:`savitzky_golay`: Smooth (and optionally differentiate) data with a Savitzky-Golay filter @@ -33,7 +33,7 @@ def db(vals): - """Calculation of decibel values :math:`20\log_{10}(x)` for a vector of values""" + r"""Calculation of decibel values :math:`20\log_{10}(x)` for a vector of values""" return 20 * np.log10(np.abs(vals)) @@ -146,29 +146,35 @@ def kaiser_lowpass(L, fcut, Fs, beta=8.0): def isstable(b, a, ftype="digital"): - """Determine whether `IIR filter (b,a)` is stable + """Determine whether an IIR filter with certain coefficients is stable - Determine whether `IIR filter (b,a)` is stable by checking roots of the - polynomial ´a´. + Determine whether IIR filter with coefficients `b` and `a` is stable by checking + the roots of the polynomial `a`. Parameters ---------- - b: ndarray - filter numerator coefficients - a: ndarray - filter denominator coefficients - ftype: string - type of filter (`digital` or `analog`) + b : ndarray + Filter numerator coefficients. These are only part of the input + parameters for compatibility reasons (especially with MATLAB code). During the + computation they are actually not used. + a : ndarray + Filter denominator coefficients. + ftype : string, optional + Filter type. 'digital' if in discrete-time (default) and 'analog' if in + continuous-time. + Returns ------- - stable: bool - whether filter is stable or not - + stable : bool + Whether filter is stable or not. """ v = np.roots(a) + # Check if all of the roots of the polynomial made from the filter coefficients... if ftype == "digital": + # ... lie inside the unit circle in discrete time. return not np.any(np.abs(v) > 1.0) elif ftype == "analog": + # ... are non-negative in continuous time. return not np.any(np.real(v) < 0) else: raise ValueError(f"isstable: filter type 'ftype={ftype}'unknown. Please " diff --git a/src/PyDynamic/misc/tools.py b/src/PyDynamic/misc/tools.py index 9eaddf99c..a4239da3e 100644 --- a/src/PyDynamic/misc/tools.py +++ b/src/PyDynamic/misc/tools.py @@ -324,7 +324,7 @@ def FreqResp2RealImag( def make_equidistant(*args, **kwargs): """ .. deprecated:: 2.0.0 - Please use :func:`PyDynamic.uncertainty.interpolate.interp1d_unc` + Please use :func:`PyDynamic.uncertainty.interpolate.interp1d_unc` """ raise DeprecationWarning( "The method `PyDynamic.misc.tools.make_equidistant` is moved " diff --git a/src/PyDynamic/model_estimation/__init__.py b/src/PyDynamic/model_estimation/__init__.py index 74b699a42..52d4e6e74 100644 --- a/src/PyDynamic/model_estimation/__init__.py +++ b/src/PyDynamic/model_estimation/__init__.py @@ -1,5 +1,5 @@ -# -*- coding: utf-8 -*- -""" +"""Fit IIR and FIR filters and identify transfer function models with uncertainties + The package :doc:`PyDynamic.model_estimation` implements methods for the model estimation by least-squares fitting and the identification of transfer function models with associated uncertainties. @@ -11,18 +11,7 @@ -uptake-of-nmi-calibrations-of-dynamic-force-torque-and/>`_ - `GitHub website `_ """ +__all__ = ["LSIIR", "LSFIR", "fit_som"] -from .fit_filter import (LSFIR, LSIIR, invLSFIR, invLSFIR_unc, invLSFIR_uncMC, - invLSIIR, invLSIIR_unc) +from .fit_filter import LSFIR, LSIIR from .fit_transfer import fit_som - -__all__ = [ - "LSFIR", - "LSIIR", - "invLSFIR", - "invLSIIR", - "invLSFIR_unc", - "invLSFIR_uncMC", - "invLSIIR_unc", - "fit_som", -] diff --git a/src/PyDynamic/model_estimation/fit_filter.py b/src/PyDynamic/model_estimation/fit_filter.py index e18c27538..230916650 100644 --- a/src/PyDynamic/model_estimation/fit_filter.py +++ b/src/PyDynamic/model_estimation/fit_filter.py @@ -1,669 +1,1050 @@ -# -*- coding: utf-8 -*- -""" -The module :mod:`PyDynamic.model_estimation.fit_filter` contains several functions to -carry out a least-squares fit to a given complex frequency response and the design of -digital deconvolution filters by least-squares fitting to the reciprocal of a given -frequency response each with associated uncertainties. +"""This module assists in carrying out least-squares IIR and FIR filter fits + +It is possible to carry out a least-squares fit of digital, time-discrete IIR and FIR +filters to a given complex frequency response and the design of digital deconvolution +filters by least-squares fitting to the reciprocal of a given frequency response each +with propagation of associated uncertainties. This module contains the following functions: -* :func:`LSIIR`: Least-squares IIR filter fit to a given frequency response -* :func:`LSFIR`: Least-squares fit of a digital FIR filter to a given frequency - response -* :func:`invLSFIR`: Least-squares fit of a digital FIR filter to the reciprocal of a - given frequency response. -* :func:`invLSFIR_unc`: Design of FIR filter as fit to reciprocal of frequency response - values with uncertainty -* :func:`invLSFIR_uncMC`: Design of FIR filter as fit to reciprocal of frequency - response values with uncertainty via Monte Carlo -* :func:`invLSIIR`: Design of a stable IIR filter as fit to reciprocal of frequency - response values -* :func:`invLSIIR_unc`: Design of a stable IIR filter as fit to reciprocal of frequency - response values with uncertainty +* :func:`LSIIR`: Least-squares (time-discrete) IIR filter fit to a given frequency + response or its reciprocal optionally propagating uncertainties. +* :func:`LSFIR`: Least-squares (time-discrete) FIR filter fit to a given + frequency response or its reciprocal optionally propagating uncertainties either + via Monte Carlo or via a singular-value decomposition and linear matrix propagation. """ -import warnings + +__all__ = ["LSFIR", "LSIIR"] + +import inspect +from enum import Enum +from typing import Optional, Tuple, Union import numpy as np import scipy.signal as dsp +from scipy.stats import multivariate_normal -from ..misc.filterstuff import grpdelay, mapinside +from ..misc.filterstuff import grpdelay, isstable, mapinside +from ..misc.tools import ( + is_2d_matrix, + is_2d_square_matrix, + number_of_rows_equals_vector_dim, +) -__all__ = [ - "LSIIR", - "LSFIR", - "invLSFIR", - "invLSFIR_unc", - "invLSIIR", - "invLSIIR_unc", - "invLSFIR_uncMC" - ] - -def _fitIIR( - Hvals: np.ndarray, - tau: int, - w: np.ndarray, - E: np.ndarray, - Na: int, +def LSIIR( + H: np.ndarray, Nb: int, - inv: bool = False - ): - """The actual fitting routing for the least-squares IIR filter. + Na: int, + f: np.ndarray, + Fs: float, + tau: Optional[int] = 0, + verbose: Optional[bool] = True, + max_stab_iter: Optional[int] = 50, + inv: Optional[bool] = False, + UH: Optional[np.ndarray] = None, + mc_runs: Optional[int] = 1000, +) -> Tuple[np.ndarray, np.ndarray, int, Union[np.ndarray, None]]: + """Least-squares (time-discrete) IIR filter fit to frequency response or reciprocal + + For fitting an IIR filter model to the reciprocal of the frequency response values + or directly to the frequency response values provided by the user, this method + uses a least-squares fit to determine an estimate of the filter coefficients. The + filter then optionally is stabilized by pole mapping and introduction of a time + delay. Associated uncertainties are optionally propagated when provided using the + GUM S2 Monte Carlo method. Parameters ---------- - Hvals : (M,) np.ndarray - (complex) frequency response values - tau : integer - initial estimate of time delay - w : np.ndarray - :math:`2 * np.pi * f / Fs` - E : np.ndarray - :math:`np.exp(-1j * np.dot(w[:, np.newaxis], Ns.T))` - Nb : int - numerator polynomial order - Na : int - denominator polynomial order - inv : bool, optional - If True the least-squares fitting is performed for the reciprocal, if False - for the actual frequency response + H : array_like of shape (M,) + (Complex) frequency response values. + Nb : int + Order of IIR numerator polynomial. + Na : int + Order of IIR denominator polynomial. + f : array_like of shape (M,) + Frequencies at which H is given. + Fs : float + Sampling frequency for digital IIR filter. + tau : int, optional + Initial estimate of time delay for obtaining a stable filter (default = 0). + verbose : bool, optional + If True (default) be more talkative on stdout. Otherwise no output is written + anywhere. + max_stab_iter : int, optional + Maximum count of iterations for stabilizing the resulting filter. If no + stabilization should be carried out, this parameter can be set to 0 (default = + 50). This parameter replaced the previous `justFit` which was dropped in + PyDynamic 2.0.0. + inv : bool, optional + If False (default) apply the fit to the frequency response values directly, + otherwise fit to the reciprocal of the frequency response values. + UH : array_like of shape (2M, 2M), optional + Uncertainties associated with real and imaginary part of H. + mc_runs : int, optional + Number of Monte Carlo runs (default = 1000). Only used if uncertainties + UH are provided. Returns ------- - b, a : IIR filter coefficients as numpy arrays - """ - exponent = -1 if inv else 1 - Ea = E[:, 1:Na + 1] - Eb = E[:, :Nb + 1] - Htau = np.exp(-1j * w * tau) * Hvals ** exponent - HEa = np.dot(np.diag(Htau), Ea) - D = np.hstack((HEa, -Eb)) - Tmp1 = np.real(np.dot(np.conj(D.T), D)) - Tmp2 = np.real(np.dot(np.conj(D.T), -Htau)) - ab = np.linalg.lstsq(Tmp1, Tmp2, rcond=None)[0] - a = np.hstack((1.0, ab[:Na])) - b = ab[Na:] - return b, a - - -def LSIIR(Hvals, Nb, Na, f, Fs, tau=0, justFit=False): - """Least-squares IIR filter fit to a given frequency response. - - This method uses Gauss-Newton non-linear optimization and pole - mapping for filter stabilization - - Parameters - ---------- - Hvals: numpy array of (complex) frequency response values of shape (M,) - Nb: integer numerator polynomial order - Na: integer denominator polynomial order - f: numpy array of frequencies at which Hvals is given of shape - (M,) - Fs: sampling frequency - tau: integer initial estimate of time delay - justFit: boolean, when true then no stabilization is carried out - - Returns - ------- - b,a: IIR filter coefficients as numpy arrays - tau: filter time delay in samples + b : np.ndarray + The IIR filter numerator coefficient vector in a 1-D sequence. + a : np.ndarray + The IIR filter denominator coefficient vector in a 1-D sequence. + tau : int + Filter time delay (in samples). + Uab : np.ndarray of shape (Nb+Na+1, Nb+Na+1) + Uncertainties associated with `[a[1:],b]`. Will be None if UH is not + provided or is None. References ---------- * Eichstädt et al. 2010 [Eichst2010]_ * Vuerinckx et al. 1996 [Vuer1996]_ + .. seealso:: :func:`PyDynamic.uncertainty.propagate_filter.IIRuncFilter` """ + if _no_uncertainties_were_provided(UH): + freq_resp_to_fit, mc_runs = H, 1 + else: + freq_resp_to_fit = _assemble_complex_from_real_imag( + _draw_multivariate_monte_carlo_samples( + _assemble_real_imag_from_complex(H), UH, mc_runs + ) + ) - print("\nLeast-squares fit of an order %d digital IIR filter" % max(Nb, Na)) - print("to a frequency response given by %d values.\n" % len(Hvals)) - - w = 2 * np.pi * f / Fs - Ns = np.arange(0, max(Nb, Na) + 1)[:, np.newaxis] - E = np.exp(-1j * np.dot(w[:, np.newaxis], Ns.T)) + if verbose: + _print_iir_welcome_msg(H, Na, Nb, UH, inv, mc_runs) - b, a = _fitIIR(Hvals, tau, w, E, Na, Nb, inv=False) + warn_unstable_msg = "CAUTION - The algorithm did NOT result in a stable IIR filter!" - if justFit: - print("Calculation done. No stabilization requested.") - if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: - print("Obtained filter is NOT stable.") - sos = np.sum( - np.abs((dsp.freqz(b, a, 2 * np.pi * f / Fs)[1] - Hvals) ** 2)) - print("Final sum of squares = %e" % sos) - tau = 0 - return b, a, tau - - if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: - stable = False + # Prepare frequencies, fitting and stabilization parameters. + omega = _compute_radial_freqs_equals_two_pi_times_freqs_over_sampling_freq(Fs, f) + Ns = np.arange(0, max(Nb, Na) + 1)[:, np.newaxis] + E = np.exp(-1j * np.dot(omega[:, np.newaxis], Ns.T)) + as_and_bs = np.empty((mc_runs, Nb + Na + 1)) + taus = np.empty((mc_runs,), dtype=int) + tau_max = tau + stab_iters = np.zeros((mc_runs,), dtype=int) + if tau == 0 and max_stab_iter == 0: + relevant_filters_mask = np.ones((mc_runs,), dtype=bool) else: - stable = True - - maxiter = 50 - - astab = mapinside(a) - run = 1 + relevant_filters_mask = np.zeros((mc_runs,), dtype=bool) - while stable is not True and run < maxiter: - g1 = grpdelay(b, a, Fs)[0] - g2 = grpdelay(b, astab, Fs)[0] - tau = np.ceil(tau + np.median(g2 - g1)) + for mc_run in range(mc_runs): + b_i, a_i = _compute_actual_iir_least_squares_fit( + freq_resp_to_fit, tau, omega, E, Na, Nb, inv + ) - b, a = _fitIIR(Hvals, tau, w, E, Na, Nb, inv=False) - if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: - astab = mapinside(a) + current_stabilization_iteration_counter = 1 + if isstable(b_i, a_i, "digital"): + relevant_filters_mask[mc_run] = True + taus[mc_run] = tau else: - stable = True - run = run + 1 - - if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: - print("Caution: The algorithm did NOT result in a stable IIR filter!") - print( - "Maybe try again with a higher value of tau0 or a higher filter " - "order?") + # If the filter by now is unstable we already tried once to stabilize with + # initial estimate of time delay and we should iterate at least once. So now + # we try with previously required maximum time delay to obtain stability. + if tau_max > tau: + b_i, a_i = _compute_actual_iir_least_squares_fit( + freq_resp_to_fit, tau_max, omega, E, Na, Nb, inv + ) + current_stabilization_iteration_counter += 1 + + if isstable(b_i, a_i, "digital"): + relevant_filters_mask[mc_run] = True + + # Set the either needed delay for reaching stability or the initial + # delay to start iterations. + taus[mc_run] = tau_max + + while ( + not relevant_filters_mask[mc_run] + and current_stabilization_iteration_counter < max_stab_iter + ): + ( + b_i, + a_i, + taus[mc_run], + relevant_filters_mask[mc_run], + ) = _compute_stabilized_filter_through_time_delay_iteration( + b_i, a_i, taus[mc_run], omega, E, freq_resp_to_fit, Nb, Na, Fs, inv + ) + current_stabilization_iteration_counter += 1 + else: + if taus[mc_run] > tau_max: + tau_max = taus[mc_run] + if verbose: + sos = np.sum( + np.abs((dsp.freqz(b_i, a_i, omega)[1] - freq_resp_to_fit) ** 2) + ) + print( + f"LSIIR: Fitting{'' if UH is None else f' for MC run {mc_run}'}" + f" finished. Conducted " + f"{current_stabilization_iteration_counter} attempts to " + f"stabilize filter. " + f"{'' if relevant_filters_mask[mc_run] else warn_unstable_msg} " + f"Final sum of squares = {sos}" + ) + + # Finally store stacked filter parameters. + as_and_bs[mc_run, :] = np.hstack((a_i[1:], b_i)) + stab_iters[mc_run] = current_stabilization_iteration_counter + + # If we actually ran Monte Carlo simulation we compute the resulting filter. + if mc_runs > 1: + # If we did not find any stable filter, calculate the final result from all + # filters. + if not np.any(relevant_filters_mask): + relevant_filters_mask = np.ones_like(relevant_filters_mask) + b_res = np.mean(as_and_bs[relevant_filters_mask, Na:], axis=0) + a_res = np.hstack( + (np.array([1.0]), np.mean(as_and_bs[relevant_filters_mask, :Na], axis=0)) + ) + stab_iter_mean = np.mean(stab_iters[relevant_filters_mask]) + + final_stabilization_iteration_counter = 1 + + # Determine if the resulting filter already is stable and if not stabilize with + # an initial delay of the previous maximum delay. + if not isstable(b_res, a_res, "digital"): + final_tau = tau_max + b_res, a_res = _compute_actual_iir_least_squares_fit( + freq_resp_to_fit, final_tau, omega, E, Na, Nb, inv + ) + final_stabilization_iteration_counter += 1 + + final_stable = isstable(b_res, a_res, "digital") + + while ( + not final_stable and final_stabilization_iteration_counter < max_stab_iter + ): + # Compute appropriate time delay for the stabilization of the resulting + # filter. + ( + b_res, + a_res, + final_tau, + final_stable, + ) = _compute_stabilized_filter_through_time_delay_iteration( + b_res, a_res, final_tau, omega, E, freq_resp_to_fit, Nb, Na, Fs, inv + ) + + final_stabilization_iteration_counter += 1 + else: + # If we did not conduct Monte Carlo simulation, we just gather final results. + b_res = b_i + a_res = a_i + stab_iter_mean = ( + final_stabilization_iteration_counter + ) = current_stabilization_iteration_counter + final_stable = relevant_filters_mask[0] + final_tau = taus[0] + if verbose: + if not final_stable: + final_stabilization_msg = ( + f"and with {final_stabilization_iteration_counter} for the final " + f"filter " + if final_stabilization_iteration_counter != stab_iter_mean + else "" + ) + print( + f"LSIIR: {warn_unstable_msg} Maybe try again with a higher value of " + f"tau or a higher filter order? Least squares fit finished after " + f"{stab_iter_mean} stabilization iterations " + f"{f'on average ' if mc_runs > 1 else ''}{final_stabilization_msg}" + f"(final tau = {final_tau})." + ) + Hd = _compute_delayed_filters_freq_resp_via_scipys_freqz( + b_res, a_res, tau, omega + ) + residuals_real_imag = _assemble_real_imag_from_complex(Hd - freq_resp_to_fit) + _compute_and_print_rms(residuals_real_imag) + + if UH: + Uab = np.cov(as_and_bs, rowvar=False) + return b_res, a_res, final_tau, Uab + return b_res, a_res, final_tau, None + + +def _print_iir_welcome_msg( + H: np.ndarray, Na: int, Nb: int, UH: np.ndarray, inv: bool, mc_runs: int +): + monte_carlo_message = ( + f" Uncertainties of the filter coefficients are " + f"evaluated using the GUM S2 Monte Carlo method " + f"with {mc_runs} runs." + ) print( - "Least squares fit finished after %d iterations (tau=%d)." % (run, tau)) - Hd = dsp.freqz(b, a, 2 * np.pi * f / Fs)[1] - Hd = Hd * np.exp(1j * 2 * np.pi * f / Fs * tau) - res = np.hstack( - (np.real(Hd) - np.real(Hvals), np.imag(Hd) - np.imag(Hvals))) - rms = np.sqrt(np.sum(res ** 2) / len(f)) - print("Final rms error = %e \n\n" % rms) - - return b, a, int(tau) - - -def LSFIR(H, N, tau, f, Fs, Wt=None): - """ - Least-squares fit of a digital FIR filter to a given frequency response. + f"LSIIR: Least-squares fit of an order {max(Nb, Na)} digital IIR filter to" + f"{' the reciprocal of' if inv else ''} a frequency response " + f"given by {len(H)} values.{monte_carlo_message if UH else ''}" + ) - Parameters - ---------- - H : (complex) frequency response values of shape (M,) - N : FIR filter order - tau : delay of filter - f : frequencies of shape (M,) - Fs : sampling frequency of digital filter - Wt : (optional) vector of weights of shape (M,) or shape (M,M) - Returns - ------- - filter coefficients bFIR (ndarray) of shape (N+1,) +def _compute_radial_freqs_equals_two_pi_times_freqs_over_sampling_freq( + sampling_freq: float, freqs: np.ndarray +) -> np.ndarray: + return 2 * np.pi * freqs / sampling_freq - """ - print("\nLeast-squares fit of an order %d digital FIR filter to the" % N) - print("reciprocal of a frequency response given by %d values.\n" % len(H)) +def _compute_actual_iir_least_squares_fit( + H: np.ndarray, + tau: int, + omega: np.ndarray, + E: np.ndarray, + Na: int, + Nb: int, + inv: bool = False, +) -> Tuple[np.ndarray, np.ndarray]: + if inv and np.all(H == 0): + raise ValueError( + f"{_get_first_public_caller()}: It is not possible to compute the " + f"reciprocal of zero but the provided frequency response" + f"{'s are constant ' if len(H) > 1 else 'is '} zero. Please " + f"provide other frequency responses Hvals." + ) + Ea = E[:, 1 : Na + 1] + Eb = E[:, : Nb + 1] + e_to_the_minus_one_j_omega_tau = _compute_e_to_the_one_j_omega_tau(-omega, tau) + delayed_freq_resp_or_recipr = e_to_the_minus_one_j_omega_tau * ( + np.reciprocal(H) if inv else H + ) + HEa = np.dot(np.diag(delayed_freq_resp_or_recipr), Ea) + D = np.hstack((HEa, -Eb)) + Tmp1 = np.real(np.dot(np.conj(D.T), D)) + Tmp2 = np.real(np.dot(np.conj(D.T), -delayed_freq_resp_or_recipr)) + ab = _fit_filter_coeffs_via_least_squares(Tmp1, Tmp2) + a = np.hstack((1.0, ab[:Na])) + b = ab[Na:] + return b, a - H = H[:, np.newaxis] - w = 2 * np.pi * f / Fs - w = w[:, np.newaxis] +def _compute_stabilized_filter_through_time_delay_iteration( + b: np.ndarray, + a: np.ndarray, + tau: int, + w: np.ndarray, + E: np.ndarray, + H: np.ndarray, + Nb: int, + Na: int, + Fs: float, + inv: Optional[bool] = False, +) -> Tuple[np.ndarray, np.ndarray, float, bool]: + tau += _compute_filter_stabilization_time_delay(b, a, Fs) - ords = np.arange(N + 1)[:, np.newaxis] - ords = ords.T + b, a = _compute_one_filter_stabilization_iteration_through_time_delay( + H, tau, w, E, Nb, Na, inv + ) - E = np.exp(-1j * np.dot(w, ords)) + return b, a, tau, isstable(b, a, "digital") - if Wt is not None: - if len(np.shape(Wt)) == 2: # is matrix - weights = np.diag(Wt) - else: - weights = np.eye(len(f)) * Wt - X = np.vstack( - [np.real(np.dot(weights, E)), np.imag(np.dot(weights, E))]) - else: - X = np.vstack([np.real(E), np.imag(E)]) - H = H * np.exp(1j * w * tau) - iRI = np.vstack([np.real(1.0 / H), np.imag(1.0 / H)]) +def _compute_filter_stabilization_time_delay( + b: np.ndarray, + a: np.ndarray, + Fs: float, +) -> int: + a_stabilized = mapinside(a) + g_1 = grpdelay(b, a, Fs)[0] + g_2 = grpdelay(b, a_stabilized, Fs)[0] + return np.ceil(np.median(g_2 - g_1)) - bFIR, res = np.linalg.lstsq(X, iRI)[:2] - if not isinstance(res, np.ndarray): - print( - "Calculation of FIR filter coefficients finished with residual " - "norm %e" % res) +def _compute_one_filter_stabilization_iteration_through_time_delay( + H: np.ndarray, + tau: int, + w: np.ndarray, + E: np.ndarray, + Nb: int, + Na: int, + inv: Optional[bool] = False, +) -> Tuple[np.ndarray, np.ndarray]: + return _compute_actual_iir_least_squares_fit(H, tau, w, E, Na, Nb, inv) - return np.reshape(bFIR, (N + 1,)) -def invLSFIR(H, N, tau, f, Fs, Wt=None): - """ Least-squares fit of a digital FIR filter to the reciprocal of a given - frequency response. +def _compute_delayed_filters_freq_resp_via_scipys_freqz( + b: np.ndarray, + a: Union[float, np.ndarray], + tau: int, + omega: np.ndarray, +) -> np.ndarray: + filters_freq_resp = dsp.freqz(b, a, omega)[1] + delayed_filters_freq_resp = filters_freq_resp * _compute_e_to_the_one_j_omega_tau( + omega, tau + ) + return delayed_filters_freq_resp - Parameters - ---------- - H: np.ndarray of shape (M,) and dtype complex - frequency response values - N: int - FIR filter order - tau: float - delay of filter - f: np.ndarray of shape (M,) - frequencies - Fs: float - sampling frequency of digital filter - Wt: np.ndarray of shape (M,) - optional - vector of weights - Returns - ------- - bFIR: np.ndarray of shape (N,) - filter coefficients +def _assemble_complex_from_real_imag(array: np.ndarray) -> np.ndarray: + if is_2d_matrix(array): + array_split_in_two_half = ( + _split_array_of_monte_carlo_samples_of_real_and_imag_parts(array) + ) + else: + array_split_in_two_half = _split_vector_of_real_and_imag_parts(array) + return array_split_in_two_half[0] + 1j * array_split_in_two_half[1] - References - ---------- - * Elster and Link [Elster2008]_ - .. see_also ::mod::`PyDynamic.uncertainty.propagate_filter.FIRuncFilter` +def _split_array_of_monte_carlo_samples_of_real_and_imag_parts( + array: np.ndarray, +) -> np.ndarray: + return np.split(ary=array, indices_or_sections=2, axis=1) - """ - print("\nLeast-squares fit of an order %d digital FIR filter to the" % N) - print("reciprocal of a frequency response given by %d values.\n" % len(H)) +def _split_vector_of_real_and_imag_parts(vector: np.ndarray) -> np.ndarray: + return np.split(ary=vector, indices_or_sections=2) - H = H[:, np.newaxis] # extend to matrix-like for simplified algebra - w = 2 * np.pi * f / Fs # set up radial frequencies - w = w[:, np.newaxis] +def _compute_x( + filter_order: int, + freqs: np.ndarray, + sampling_freq: float, + weights: np.ndarray, +): + e = np.exp( + -1j + * 2 + * np.pi + * np.dot( + freqs[:, np.newaxis] / sampling_freq, + np.arange(filter_order + 1)[:, np.newaxis].T, + ) + ) + x = np.vstack((np.real(e), np.imag(e))) + x = np.dot(np.diag(weights), x) + return x - ords = np.arange(N + 1)[:, np.newaxis] # set up design matrix - ords = ords.T - E = np.exp(-1j * np.dot(w, ords)) +def _assemble_real_imag_from_complex(array: np.ndarray) -> np.ndarray: + return np.hstack((np.real(array), np.imag(array))) - if Wt is not None: # set up weighted design matrix if necessary - if len(np.shape(Wt)) == 2: # is matrix - weights = np.diag(Wt) - else: - weights = np.eye(len(f)) * Wt - X = np.vstack([np.real(np.dot(weights, E)), np.imag(np.dot(weights, E))]) - else: - X = np.vstack([np.real(E), np.imag(E)]) - Hs = H * np.exp(1j * w * tau) # apply time delay for improved fit quality - iRI = np.vstack([np.real(1.0 / Hs), np.imag(1.0 / Hs)]) +def _compute_and_print_rms(residuals_real_imag: np.ndarray) -> np.ndarray: + rms = np.sqrt(np.sum(residuals_real_imag ** 2) / (len(residuals_real_imag) // 2)) + print( + f"{_get_first_public_caller()}: Calculation of filter coefficients finished. " + f"Final rms error = {rms}" + ) + return rms - bFIR, res = np.linalg.lstsq(X, iRI)[:2] # the actual fitting - if (not isinstance(res, np.ndarray)) or (len(res) == 1): # summarise results - print( - "Calculation of FIR filter coefficients finished with residual " - "norm %e" % res - ) - Hd = dsp.freqz(bFIR, 1, 2 * np.pi * f / Fs)[1] - Hd = Hd * np.exp(1j * 2 * np.pi * f / Fs * tau) - res = np.hstack((np.real(Hd) - np.real(H), np.imag(Hd) - np.imag(H))) - rms = np.sqrt(np.sum(res ** 2) / len(f)) - print("Final rms error = %e \n\n" % rms) +def invLSIIR(H, Nb, Na, f, Fs, tau, justFit=False, verbose=True): + """Least-squares IIR filter fit to the reciprocal of given frequency response values - return bFIR.flatten() + This essentially is a wrapper for a call of :func:`LSIIR` with the according + parameter set. + """ + if justFit: + return LSIIR(H, Nb, Na, f, Fs, tau, verbose, 0, True) + return LSIIR(H, Nb, Na, f, Fs, tau, verbose, 50, True) -def invLSFIR_unc(H, UH, N, tau, f, Fs, wt=None, verbose=True, trunc_svd_tol=None): - """Design of FIR filter as fit to reciprocal of frequency response values - with uncertainty +def invLSIIR_unc( + H: np.ndarray, + UH: np.ndarray, + Nb: int, + Na: int, + f: np.ndarray, + Fs: float, + tau: int = 0, +) -> Tuple[np.ndarray, np.ndarray, int, Optional[np.ndarray]]: + """Stable IIR filter as fit to reciprocal of frequency response with uncertainty + + This essentially is a wrapper for a call of :func:`LSIIR` with the according + parameter set. + """ + print( + f"invLSIIR_unc: Least-squares fit of an order {max(Nb, Na)} digital IIR " + f"filter to the reciprocal of a frequency response given by {len(H)} " + f"values. Uncertainties of the filter coefficients are evaluated using " + "the GUM S2 Monte Carlo method with 1000 runs." + ) + return LSIIR(H, Nb, Na, f, Fs, tau, False, 50, True, UH, 1000) - Least-squares fit of a digital FIR filter to the reciprocal of a - frequency response - for which associated uncertainties are given for its real and imaginary - part. - Uncertainties are propagated using a truncated svd and linear matrix - propagation. +def LSFIR( + H: np.ndarray, + N: int, + f: np.ndarray, + Fs: float, + tau: int, + weights: Optional[np.ndarray] = None, + verbose: Optional[bool] = True, + inv: Optional[bool] = False, + UH: Optional[np.ndarray] = None, + mc_runs: Optional[int] = None, + trunc_svd_tol: Optional[float] = None, +) -> Tuple[np.ndarray, np.ndarray]: + """Design of FIR filter as fit to freq. resp. or its reciprocal with uncertainties + + Least-squares fit of a (time-discrete) digital FIR filter to the reciprocal of the + frequency response values or actual frequency response values for which + associated uncertainties are given for its real and imaginary part. Uncertainties + are propagated either using a Monte Carlo method if mc_runs is provided as + integer greater than one or otherwise using a truncated singular-value + decomposition and linear matrix propagation. The Monte Carlo approach may help in + cases where the weighting matrix or the Jacobian are ill-conditioned, resulting + in false uncertainties associated with the filter coefficients. + + .. note:: Uncertainty propagation via singular-value decomposition is not yet + implemented, when fitting to the actual frequency response and not its + reciprocal. Alternatively specify the number mc_runs of runs to propagate the + uncertainties via the Monte Carlo method. Parameters ---------- - H: np.ndarray of shape (M,) - frequency response values - UH: np.ndarray of shape (2M,2M) - uncertainties associated with the real and imaginary part - N: int - FIR filter order - tau: float - delay of filter - f: np.ndarray of shape (M,) - frequencies - Fs: float - sampling frequency of digital filter - wt: np.ndarray of shape (2M,) - optional - array of weights for a weighted least-squares method (default = None - results in no weighting) - verbose: bool, optional - whether to print statements to the command line (default = True) - trunc_svd_tol: float, optional - lower bound for singular values to be considered for pseudo-inverse + H : array_like of shape (M,) or (2M,) + (Complex) frequency response values in dtype complex or as a vector + containing the real parts in the first half followed by the imaginary parts + N : int + FIR filter order + f : array_like of shape (M,) + frequencies at which H is given + Fs : float + sampling frequency of digital FIR filter + tau : int + time delay in samples for improved fitting + weights : array_like of shape (2M,), optional + vector of weights for a weighted least-squares method (default results in no + weighting) + verbose: bool, optional + If True (default) verbose output is printed to the command line + inv : bool, optional + If False (default) apply the fit to the frequency response values directly, + otherwise fit to the reciprocal of the frequency response values + UH : array_like of shape (2M,2M), optional + uncertainties associated with the real and imaginary part of H + mc_runs : int, optional + Number of Monte Carlo runs greater than one. Only used, if uncertainties + associated with the real and imaginary part of H are provided. Only one of + mc_runs and trunc_svd_tol can be provided. + trunc_svd_tol : float, optional + Lower bound for singular values to be considered for pseudo-inverse. Values + smaller than this threshold are considered zero. Defaults to zero. Only one of + mc_runs and trunc_svd_tol can be provided. Returns ------- - b: np.ndarray of shape (N+1,) - filter coefficients of shape - Ub: np.ndarray of shape (N+1,N+1) - uncertainties associated with b + b : array_like of shape (N+1,) + The FIR filter coefficient vector in a 1-D sequence + Ub : array_like of shape (N+1,N+1) + Uncertainties associated with b. Will be None if UH is not + provided or is None. + + Raises + ------ + NotImplementedError + The least-squares fitting of a digital FIR filter to a frequency response H + with propagation of associated uncertainties using a truncated singular-value + decomposition and linear matrix propagation is not yet implemented. + Alternatively specify the number mc_runs of runs to propagate the uncertainties + via the Monte Carlo method. References ---------- - * Elster and Link [Elster2008]_ - """ + * Elster and Link [Elster2008]_ + .. seealso:: :func:`PyDynamic.uncertainty.propagate_filter.FIRuncFilter` + """ + ( + freq_resps_real_imag, + freqs, + mc_runs, + propagation_method, + sampling_freq, + weights, + ) = _validate_and_prepare_fir_inputs( + Fs, H, UH, f, inv, mc_runs, trunc_svd_tol, weights + ) if verbose: - print("\nLeast-squares fit of an order %d digital FIR filter to the" % N) - print("reciprocal of a frequency response given by %d values" % len(H)) - print("and propagation of associated uncertainties.") - - # Step 1: Propagation of uncertainties to reciprocal of frequency response - runs = 10000 - Nf = len(f) - - if not len(H) == UH.shape[0]: - # Assume that H is given as complex valued frequency response. - RI = np.hstack((np.real(H), np.imag(H))) - else: - RI = H.copy() - H = H[:Nf] + 1j * H[Nf:] - HRI = np.random.multivariate_normal(RI, UH, runs) # random draws of real,imag of - # freq response values - omtau = 2 * np.pi * f / Fs * tau - - # Vectorized Monte Carlo for propagation to inverse - absHMC = HRI[:, :Nf] ** 2 + HRI[:, Nf:] ** 2 - HiMC = np.hstack(((HRI[:, :Nf] * np.tile(np.cos(omtau), (runs, 1)) + - HRI[:, Nf:] * np.tile(np.sin(omtau), (runs, 1))) / - absHMC, - (HRI[:, Nf:] * np.tile(np.cos(omtau), (runs, 1)) - - HRI[:, :Nf] * np.tile(np.sin(omtau), (runs, 1))) / - absHMC)) - UiH = np.cov(HiMC, rowvar=False) - - # Step 2: Fit filter coefficients and evaluate uncertainties - if isinstance(wt, np.ndarray): - if wt.shape != np.diag(UiH).shape[0]: - raise ValueError("invLSFIR_unc: User-defined weighting has wrong " - "dimension. wt is expected to be of length " - f"{2 * Nf} but is of length {wt.shape}.") + _print_fir_welcome_msg(H, N, inv, mc_runs, propagation_method, trunc_svd_tol) + omega, delayed_freq_resp_real_imag_or_recipr, x = _prepare_common_fitting_inputs( + N, freq_resps_real_imag, freqs, inv, sampling_freq, tau, weights + ) + if propagation_method == _PropagationMethod.NONE: + b_fir = _fit_filter_coeffs_via_least_squares( + x, delayed_freq_resp_real_imag_or_recipr + ) + Ub_fir = None else: - wt = np.ones(2 * Nf) - - E = np.exp(-1j * 2 * np.pi * np.dot(f[:, np.newaxis] / Fs, - np.arange(N + 1)[:, np.newaxis].T)) - X = np.vstack((np.real(E), np.imag(E))) - X = np.dot(np.diag(wt), X) - Hm = H * np.exp(1j * 2 * np.pi * f / Fs * tau) - Hri = np.hstack((np.real(1.0 / Hm), np.imag(1.0 / Hm))) - - u, s, v = np.linalg.svd(X, full_matrices=False) - if isinstance(trunc_svd_tol, float): - s[s < trunc_svd_tol] = 0.0 - StSInv = np.zeros_like(s) - StSInv[s > 0] = s[s > 0] ** (-2) - - M = np.dot(np.dot(np.dot(v.T, np.diag(StSInv)), np.diag(s)), u.T) - - bFIR = np.dot(M, Hri[:, np.newaxis]) # actual fitting - UbFIR = np.dot(np.dot(M, UiH), M.T) # evaluation of uncertainties + mc_freq_resps_real_imag = _draw_multivariate_monte_carlo_samples( + vector=freq_resps_real_imag, covariance_matrix=UH, mc_runs=mc_runs + ) + if propagation_method == _PropagationMethod.MC: + b_fir, Ub_fir = _fit_fir_filter_with_uncertainty_propagation_via_mc( + inv, mc_freq_resps_real_imag, omega, tau, x + ) + else: + b_fir, Ub_fir = _fit_fir_filter_with_uncertainty_propagation_via_svd( + mc_freq_resps_real_imag, + mc_runs, + omega, + delayed_freq_resp_real_imag_or_recipr, + tau, + trunc_svd_tol, + x, + ) + if verbose: + _print_fir_result_msg(b_fir, freq_resps_real_imag, inv, omega, tau) + return b_fir, Ub_fir + + +class _PropagationMethod(Enum): + NONE = 0 + MC = 1 + SVD = 2 + + +def _validate_and_prepare_fir_inputs( + sampling_freq: float, + H: np.ndarray, + UH: np.ndarray, + freqs: np.ndarray, + inv: bool, + mc_runs: int, + trunc_svd_tol: float, + weights: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray, int, _PropagationMethod, float, np.ndarray]: + n_freqs = len(freqs) + two_n_freqs = 2 * n_freqs + sampling_freq = sampling_freq + weights = _validate_and_return_weights(weights, expected_len=two_n_freqs) + freq_resps_real_imag = _validate_and_assemble_freq_resps( + H, + expected_len_when_complex=n_freqs, + expected_len_when_real_imag=two_n_freqs, + ) + _validate_uncertainties(vector=freq_resps_real_imag, covariance_matrix=UH) + _validate_fir_uncertainty_propagation_method_related_inputs( + UH, inv, mc_runs, trunc_svd_tol + ) + propagation_method, mc_runs = _determine_fir_propagation_method(UH, mc_runs) + return ( + freq_resps_real_imag, + freqs, + mc_runs, + propagation_method, + sampling_freq, + weights, + ) - bFIR = bFIR.flatten() - if verbose: - Hd = dsp.freqz(bFIR, 1, 2 * np.pi * f / Fs)[1] - Hd = Hd * np.exp(1j * 2 * np.pi * f / Fs * tau) - res = np.hstack((np.real(Hd) - np.real(H), np.imag(Hd) - np.imag(H))) - rms = np.sqrt(np.sum(res ** 2) / len(f)) - print("Final rms error = %e \n\n" % rms) +def _validate_and_return_weights(weights: np.ndarray, expected_len: int) -> np.ndarray: + if weights is not None: + if not isinstance(weights, np.ndarray): + raise TypeError( + f"{_get_first_public_caller()}: User-defined weighting has wrong " + "type. wt is expected to be a NumPy ndarray but is of type " + f"{type(weights)}.", + ) + if len(weights) != expected_len: + raise ValueError( + f"{_get_first_public_caller()}: User-defined weighting has wrong " + "dimension. wt is expected to be of length " + f"{expected_len} but is of length {len(weights)}." + ) + return weights + return np.ones(expected_len) + + +def _get_first_public_caller() -> str: + for caller in inspect.stack(): + if "PyDynamic" in caller.filename and caller.function[0] != "_": + return caller.function + return inspect.stack()[1].function + + +def _validate_and_assemble_freq_resps( + H: np.ndarray, + expected_len_when_complex: int, + expected_len_when_real_imag: int, +) -> np.ndarray: + if _is_dtype_complex(H): + _validate_length_of_h(H, expected_length=expected_len_when_complex) + return _assemble_real_imag_from_complex(H) + _validate_length_of_h(H, expected_length=expected_len_when_real_imag) + return H + + +def _is_dtype_complex(array: np.ndarray) -> bool: + return array.dtype == np.complexfloating + + +def _validate_length_of_h(H: np.ndarray, expected_length: int): + if not len(H) == expected_length: + raise ValueError( + f"{_get_first_public_caller()}: vector of complex frequency responses " + f"is expected to contain {expected_length} elements, corresponding to the " + f"number of frequencies, but actually contains {len(H)} " + f"elements. Please adjust either of the two inputs." + ) - return bFIR, UbFIR +def _validate_uncertainties(vector: np.ndarray, covariance_matrix: np.ndarray): + if not _no_uncertainties_were_provided(covariance_matrix): + _validate_vector_and_corresponding_uncertainties_dims( + vector=vector, covariance_matrix=covariance_matrix + ) -def invLSFIR_uncMC(H, UH, N, tau, f, Fs, wt=None, verbose=True): - """Design of FIR filter as fit to reciprocal of frequency response values - with uncertainty - Least-squares fit of a FIR filter to the reciprocal of a frequency response - for which associated uncertainties are given for its real and imaginary - parts. - Uncertainties are propagated using a Monte Carlo method. This method may - help in cases where - the weighting matrix or the Jacobian are ill-conditioned, resulting in - false uncertainties - associated with the filter coefficients. +def _no_uncertainties_were_provided(covariance_matrix: Union[np.ndarray, None]) -> bool: + return covariance_matrix is None - Parameters - ---------- - H: np.ndarray of shape (M,) and dtype complex - frequency response values - UH: np.ndarray of shape (2M,2M) - uncertainties associated with the real and imaginary part of H - N: int - FIR filter order - tau: int - time delay of filter in samples - f: np.ndarray of shape (M,) - frequencies corresponding to H - Fs: float - sampling frequency of digital filter - wt: np.ndarray of shape (2M,) - optional - array of weights for a weighted least-squares method (default = None - results in no weighting) - verbose: bool, optional - whether to print statements to the command line (default = True) - Returns - ------- - b: np.ndarray of shape (N+1,) - filter coefficients of shape - Ub: np.ndarray of shape (N+1, N+1) - uncertainties associated with b +def _validate_vector_and_corresponding_uncertainties_dims( + vector: np.ndarray, covariance_matrix: Union[np.ndarray] +): + if not isinstance(covariance_matrix, np.ndarray): + raise TypeError( + f"{_get_first_public_caller()}: if uncertainties are provided, " + f"they are expected to be of type np.ndarray, but uncertainties are of type" + f" {type(covariance_matrix)}." + ) + if not number_of_rows_equals_vector_dim(matrix=covariance_matrix, vector=vector): + raise ValueError( + f"{_get_first_public_caller()}: number of rows of uncertainties and " + f"number of elements of values are expected to match. But {len(vector)} " + f"values and {covariance_matrix.shape} uncertainties were provided. Please " + f"adjust either the values or their corresponding uncertainties." + ) + if not is_2d_square_matrix(covariance_matrix): + raise ValueError( + f"{_get_first_public_caller()}: uncertainties are expected to be " + f"provided in a square matrix shape but are of shape " + f"{covariance_matrix.shape}." + ) - References - ---------- - * Elster and Link [Elster2008]_ - """ - if verbose: - print("\nLeast-squares fit of an order %d digital FIR filter to the" % N) - print("reciprocal of a frequency response given by %d values" % len(H)) - print("and propagation of associated uncertainties.") - - # Step 1: Propagation of uncertainties to reciprocal of frequency response - runs = 10000 - HRI = np.random.multivariate_normal(np.hstack((np.real(H), np.imag(H))), UH, runs) - - # Step 2: Fitting the filter coefficients - Nf = len(f) - if isinstance(wt, np.ndarray): - if wt.shape != 2 * Nf: - raise ValueError("invLSFIR_uncMC: User-defined weighting has wrong " - "dimension. wt is expected to be of length " - f"{2 * Nf} but is of length {wt.shape}.") - else: - wt = np.ones(2 * Nf) +def _validate_fir_uncertainty_propagation_method_related_inputs( + covariance_matrix: Union[np.ndarray, None], + inv: bool, + mc_runs: Union[int, None], + trunc_svd_tol: Union[float, None], +): + def _are_we_supposed_to_apply_method_on_freq_resp_directly() -> bool: + return not inv + + def _both_propagation_methods_simultaneously_requested() -> bool: + return _input_for_svd_was_provided( + trunc_svd_tol + ) and _number_of_monte_carlo_runs_was_provided(mc_runs) + + def _are_we_supposed_to_fit_freq_resp_with_svd_propagation() -> bool: + return ( + _input_for_svd_was_provided(trunc_svd_tol) + or not _number_of_monte_carlo_runs_was_provided(mc_runs) + ) and _are_we_supposed_to_apply_method_on_freq_resp_directly() + + def _number_of_mc_runs_too_small(): + return mc_runs == 1 + + if _no_uncertainties_were_provided(covariance_matrix): + if _number_of_monte_carlo_runs_was_provided(mc_runs): + raise ValueError( + f"\n{_get_first_public_caller()}: The least-squares fitting of a " + f"digital FIR filter " + "to a frequency response H with propagation of associated " + f"uncertainties via the Monte Carlo method requires that uncertainties " + f"are provided via input parameter UH. No uncertainties were given " + f"but number of Monte Carlo runs set to {mc_runs}. Either remove " + f"mc_runs or provide uncertainties." + ) + if _input_for_svd_was_provided(trunc_svd_tol): + raise ValueError( + f"\n{_get_first_public_caller()}: The least-squares fitting of a " + f"digital FIR filter " + "to a frequency response H with propagation of associated " + "uncertainties via a truncated singular-value decomposition and linear " + "matrix propagation requires that uncertainties are provided via " + "input parameter UH. No uncertainties were given but lower bound for " + f"singular values trunc_svd_tol={trunc_svd_tol}. Either remove " + "trunc_svd_tol or provide uncertainties." + ) + elif _both_propagation_methods_simultaneously_requested(): + raise ValueError( + f"\n{_get_first_public_caller()}: Only one of mc_runs and trunc_svd_tol " + f"can be " + f"provided but mc_runs={mc_runs} and trunc_svd_tol={trunc_svd_tol}." + ) + elif _are_we_supposed_to_fit_freq_resp_with_svd_propagation(): + raise NotImplementedError( + f"\n{_get_first_public_caller()}: The least-squares fitting of a digital " + f"FIR filter to a frequency response H with propagation of associated " + f"uncertainties using a truncated singular-value decomposition and linear " + f"matrix propagation is not yet implemented. Alternatively specify " + f"the number mc_runs of runs to propagate the uncertainties via the " + f"Monte Carlo method." + ) + elif _number_of_mc_runs_too_small(): + raise ValueError( + f"\n{_get_first_public_caller()}: Number of Monte Carlo runs is expected " + f"to be greater than 1 but mc_runs={mc_runs}. Please provide a greater " + f"number of runs or switch to propagation of uncertainties " + f"via singular-value decomposition by leaving out mc_runs." + ) - E = np.exp(-1j * 2 * np.pi * np.dot(f[:, np.newaxis] / Fs, - np.arange(N + 1)[:, np.newaxis].T)) - X = np.vstack((np.real(E), np.imag(E))) - X = np.dot(np.diag(wt), X) - bF = np.zeros((N + 1, runs)) - resn = np.zeros((runs,)) - for k in range(runs): - Hk = HRI[k, :Nf] + 1j * HRI[k, Nf:] - Hkt = Hk * np.exp(1j * 2 * np.pi * f / Fs * tau) - iRI = np.hstack([np.real(1.0 / Hkt), np.imag(1.0 / Hkt)]) - bF[:, k], res = np.linalg.lstsq(X, iRI)[:2] - resn[k] = np.linalg.norm(res) - bFIR = np.mean(bF, axis=1) - UbFIR = np.cov(bF, rowvar=True) +def _number_of_monte_carlo_runs_was_provided(mc_runs: Union[int, None]) -> bool: + return bool(mc_runs) - return bFIR, UbFIR +def _input_for_svd_was_provided(trunc_svd_tol: Union[float, None]) -> bool: + return trunc_svd_tol is not None -def invLSIIR(Hvals, Nb, Na, f, Fs, tau, justFit=False, verbose=True): - """Design of a stable IIR filter as fit to reciprocal of frequency - response values - Least-squares fit of a digital IIR filter to the reciprocal of a given set - of frequency response values using the equation-error method and - stabilization - by pole mapping and introduction of a time delay. +def _determine_fir_propagation_method( + covariance_matrix: Union[np.ndarray, None], + mc_runs: Union[int, None], +) -> Tuple[_PropagationMethod, Union[int, None]]: + if _no_uncertainties_were_provided(covariance_matrix): + return _PropagationMethod.NONE, None + if _number_of_monte_carlo_runs_was_provided(mc_runs): + return _PropagationMethod.MC, mc_runs + return _PropagationMethod.SVD, 10000 - Parameters - ---------- - Hvals: np.ndarray of shape (M,) and dtype complex - frequency response values. - Nb: int - order of IIR numerator polynomial. - Na: int - order of IIR denominator polynomial. - f: np.ndarray of shape (M,) - frequencies corresponding to Hvals - Fs: float - sampling frequency for digital IIR filter. - tau: float - initial estimate of time delay for filter stabilization. - justFit: bool - if True then no stabilization is carried out. - verbose: bool - If True print some more detail about input parameters. - Returns - ------- - b : np.ndarray - The IIR filter numerator coefficient vector in a 1-D sequence - a : np.ndarray - The IIR filter denominator coefficient vector in a 1-D sequence - tau : int - time delay (in samples) +def _print_fir_welcome_msg( + freq_resp_in_provided_shape, + filter_order, + inv, + mc_runs, + propagation_method, + trunc_svd_tol, +): + if propagation_method != _PropagationMethod.NONE: + if propagation_method == _PropagationMethod.MC: + method_specific_propagation_msg = f"Monte Carlo method with {mc_runs} runs" + else: + method_specific_propagation_msg = ( + f"truncated singular-value decomposition and linear matrix " + f"propagation with {trunc_svd_tol} as lower bound for the singular " + f"values to be considered for the pseudo-inverse" + ) + propagation_msg = ( + ". The frequency response's associated uncertainties are propagated " + "via a " + method_specific_propagation_msg + ) + else: + propagation_msg = " without propagation of associated uncertainties" + print( + f"\nLSFIR: Least-squares fit of an order {filter_order} digital FIR " + f"filter to{' the reciprocal of' if inv else ''} a frequency response given by " + f"{len(freq_resp_in_provided_shape)} values{propagation_msg}." + ) - References - ---------- - * Eichstädt, Elster, Esward, Hessling [Eichst2010]_ - """ - from numpy import count_nonzero, roots, ceil, median +def _prepare_common_fitting_inputs( + filter_order: int, + freq_resps_real_imag: np.ndarray, + freqs: np.ndarray, + inv: bool, + sampling_freq: float, + tau: int, + weights: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + x = _compute_x(filter_order, freqs, sampling_freq, weights) + omega = _compute_radial_freqs_equals_two_pi_times_freqs_over_sampling_freq( + sampling_freq, freqs + ) + complex_freq_resp = _assemble_complex_from_real_imag(freq_resps_real_imag) + delayed_complex_freq_resp = complex_freq_resp * _compute_e_to_the_one_j_omega_tau( + omega, tau + ) + delayed_freq_resp_real_imag_or_recipr = ( + _assemble_delayed_freq_resp_real_imag_or_recipr(delayed_complex_freq_resp, inv) + ) + return omega, delayed_freq_resp_real_imag_or_recipr, x - if verbose: - print( - "\nLeast-squares fit of an order %d digital IIR filter to the" % max(Nb, Na) - ) - print("reciprocal of a frequency response given by %d values.\n" % len(Hvals)) - w = 2 * np.pi * f / Fs - Ns = np.arange(0, max(Nb, Na) + 1)[:, np.newaxis] - E = np.exp(-1j * np.dot(w[:, np.newaxis], Ns.T)) +def _compute_e_to_the_one_j_omega_tau(omega: np.ndarray, tau: int) -> np.ndarray: + return np.exp(1j * omega * tau) - bi, ai = _fitIIR(Hvals, tau, w, E, Na, Nb, inv=True) - if justFit: # no uncertainty evaluation - return bi, ai +def _assemble_delayed_freq_resp_real_imag_or_recipr( + delayed_complex_freq_resp: np.ndarray, inv: bool +) -> np.ndarray: + return _assemble_real_imag_from_complex( + np.reciprocal(delayed_complex_freq_resp) if inv else delayed_complex_freq_resp + ) - if count_nonzero(abs(roots(ai)) > 1) > 0: - stable = False - else: - stable = True - maxiter = 50 +def _fit_filter_coeffs_via_least_squares( + x: np.ndarray, delayed_freq_resp_real_imag_or_recipr: np.ndarray +) -> np.ndarray: + return np.linalg.lstsq(x, delayed_freq_resp_real_imag_or_recipr, rcond=None)[0] - astab = mapinside(ai) # stabilise filter - run = 1 - while stable is not True and run < maxiter: # shift delay such that filter - # becomes stable - g1 = grpdelay(bi, ai, Fs)[0] - g2 = grpdelay(bi, astab, Fs)[0] - tau = ceil(tau + median(g2 - g1)) +def _draw_multivariate_monte_carlo_samples( + vector: np.ndarray, covariance_matrix: np.ndarray, mc_runs: int +) -> np.ndarray: + return multivariate_normal.rvs(mean=vector, cov=covariance_matrix, size=mc_runs) - bi, ai = _fitIIR(Hvals, tau, w, E, Na, Nb, inv=True) - if count_nonzero(abs(roots(ai)) > 1) > 0: - astab = mapinside(ai) - else: - stable = True - run = run + 1 - if count_nonzero(abs(roots(ai)) > 1) > 0 and verbose: - print("Caution: The algorithm did NOT result in a stable IIR filter!") - print( - "Maybe try again with a higher value of tau0 or a higher filter order?" +def _fit_fir_filter_with_uncertainty_propagation_via_mc( + inv: bool, + mc_freq_resps_real_imag: np.ndarray, + omega: np.ndarray, + tau: int, + x: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + mc_delayed_freq_resps_or_recipr_real_imag = ( + _compute_mc_delayed_freq_resps_or_reciprs_real_imag( + inv, mc_freq_resps_real_imag, omega, tau ) + ) + return _conduct_fir_uncertainty_propagation_via_mc( + mc_delayed_freq_resps_or_recipr_real_imag, x + ) - if verbose: - print("Least squares fit finished after %d iterations (tau=%d).\n" % (run, tau)) - Hd = dsp.freqz(bi, ai, 2 * np.pi * f / Fs)[1] - Hd = Hd * np.exp(1j * 2 * np.pi * f / Fs * tau) - res = np.hstack((np.real(Hd) - np.real(Hvals), np.imag(Hd) - np.imag(Hvals))) - rms = np.sqrt(np.sum(res ** 2) / len(f)) - print("Final rms error = %e \n\n" % rms) - return bi, ai, int(tau) +def _compute_mc_delayed_freq_resps_or_reciprs_real_imag( + inv: bool, mc_freq_resps_real_imag: np.ndarray, omega: np.ndarray, tau: int +) -> np.ndarray: + mc_complex_freq_resps = _assemble_complex_from_real_imag(mc_freq_resps_real_imag) + mc_delayed_complex_freq_resps = ( + mc_complex_freq_resps * _compute_e_to_the_one_j_omega_tau(omega, tau) + ) + return _assemble_delayed_freq_resp_real_imag_or_recipr( + mc_delayed_complex_freq_resps, inv + ) -def invLSIIR_unc(H, UH, Nb, Na, f, Fs, tau=0): - """Design of stabel IIR filter as fit to reciprocal of given frequency - response with uncertainty +def _conduct_fir_uncertainty_propagation_via_mc( + mc_freq_resps_real_imag: np.ndarray, x: np.ndarray +) -> Tuple[np.ndarray, np.ndarray]: + mc_b_firs = np.array( + [ + _fit_filter_coeffs_via_least_squares(x, mc_freq_resp) + for mc_freq_resp in mc_freq_resps_real_imag + ] + ).T + b_fir = np.mean(mc_b_firs, axis=1) + Ub_fir = np.cov(mc_b_firs, rowvar=True) + return b_fir, Ub_fir + + +def _fit_fir_filter_with_uncertainty_propagation_via_svd( + mc_freq_resps_real_imag: np.ndarray, + mc_runs: int, + omega: np.ndarray, + preprocessed_freq_resp: np.ndarray, + tau: int, + trunc_svd_tol: float, + x: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + list_of_mc_freq_resps_real_and_imag = ( + _split_array_of_monte_carlo_samples_of_real_and_imag_parts( + mc_freq_resps_real_imag + ) + ) + mc_freq_resps_real = list_of_mc_freq_resps_real_and_imag[0] + mc_freq_resps_imag = list_of_mc_freq_resps_real_and_imag[1] + recipr_of_sqr_abs_of_mc_freq_resps = np.reciprocal( + mc_freq_resps_real ** 2 + mc_freq_resps_imag ** 2 + ) + omega_tau = omega * tau + cos_omega_tau = np.tile(np.cos(omega_tau), (mc_runs, 1)) + sin_omega_tau = np.tile(np.sin(omega_tau), (mc_runs, 1)) + UiH = np.cov( + np.hstack( + ( + ( + mc_freq_resps_real * cos_omega_tau + + mc_freq_resps_imag * sin_omega_tau + ) + * recipr_of_sqr_abs_of_mc_freq_resps, + ( + mc_freq_resps_imag * cos_omega_tau + - mc_freq_resps_real * sin_omega_tau + ) + * recipr_of_sqr_abs_of_mc_freq_resps, + ) + ), + rowvar=False, + ) + u, s, v = np.linalg.svd(x, full_matrices=False) + if _input_for_svd_was_provided(trunc_svd_tol): + s[s < trunc_svd_tol] = 0.0 + StSInv = np.zeros_like(s) + StSInv[s > 0] = s[s > 0] ** (-2) + M = np.dot(np.dot(np.dot(v.T, np.diag(StSInv)), np.diag(s)), u.T) + b_fir = np.dot(M, preprocessed_freq_resp[:, np.newaxis]).flatten() + Ub_fir = np.dot(np.dot(M, UiH), M.T) + return b_fir, Ub_fir - Least-squares fit of a digital IIR filter to the reciprocal of a given set - of frequency response values with given associated uncertainty. - Propagation of uncertainties is - carried out using the Monte Carlo method. - Parameters - ---------- +def _print_fir_result_msg( + b_fir: np.ndarray, + freq_resps_real_imag: np.ndarray, + inv: bool, + omega: np.ndarray, + tau: int, +): + complex_h = _assemble_complex_from_real_imag(freq_resps_real_imag) + original_values = np.reciprocal(complex_h) if inv else complex_h + delayed_filters_freq_resp = _compute_delayed_filters_freq_resp_via_scipys_freqz( + b_fir, 1.0, tau, omega + ) + complex_residuals = delayed_filters_freq_resp - original_values + residuals_real_imag = _assemble_real_imag_from_complex(complex_residuals) + _compute_and_print_rms(residuals_real_imag) - H: np.ndarray of shape (M,) and dtype complex - frequency response values. - UH: np.ndarray of shape (2M,2M) - uncertainties associated with real and imaginary part of H - Nb: int - order of IIR numerator polynomial. - Na: int - order of IIR denominator polynomial. - f: np.ndarray of shape (M,) - frequencies corresponding to H - Fs: float - sampling frequency for digital IIR filter. - tau: float - initial estimate of time delay for filter stabilization. - Returns - ------- - b,a: np.ndarray - IIR filter coefficients - tau: int - time delay (in samples) - Uba: np.ndarray of shape (Nb+Na+1, Nb+Na+1) - uncertainties associated with [a[1:],b] +def invLSFIR( + H: np.ndarray, + N: int, + tau: int, + f: np.ndarray, + Fs: float, + Wt: Optional[np.ndarray] = None, +) -> np.ndarray: + """Least-squares (time-discrete) digital FIR filter fit to freq. resp. reciprocal + + This essentially is a wrapper for a call of :func:`LSFIR` with the according + parameter set. + """ + print( + f"invLSFIR: Least-squares fit of an order {N} digital FIR filter to the " + f"reciprocal of a frequency response H given by {len(H)} values.\n" + ) + return LSFIR(H=H, N=N, f=f, Fs=Fs, tau=tau, weights=Wt, verbose=False, inv=True)[0] - References - ---------- - * Eichstädt, Elster, Esward and Hessling [Eichst2010]_ - .. seealso:: :mod:`PyDynamic.uncertainty.propagate_filter.IIRuncFilter` - :mod:`PyDynamic.model_estimation.fit_filter.invLSIIR` +def invLSFIR_unc( + H: np.ndarray, + UH: np.ndarray, + N: int, + tau: int, + f: np.ndarray, + Fs: float, + wt: Optional[np.ndarray] = None, + verbose: Optional[bool] = True, + trunc_svd_tol: Optional[float] = None, +) -> Tuple[np.ndarray, np.ndarray]: + """Design of FIR filter as fit to the reciprocal of a freq. resp. with uncertainties + + This essentially is a wrapper for a call of :func:`LSFIR` with the according + parameter set. """ + return LSFIR(H, N, f, Fs, tau, wt, verbose, True, UH, trunc_svd_tol=trunc_svd_tol) - runs = 1000 - - print("\nLeast-squares fit of an order %d digital IIR filter to the" % max(Nb, Na)) - print("reciprocal of a frequency response given by %d values.\n" % len(H)) - print( - "Uncertainties of the filter coefficients are evaluated using\n" - "the GUM S2 Monte Carlo method with %d runs.\n" % runs - ) - # Step 1: Propagation of uncertainties to frequency response - HRI = np.random.multivariate_normal(np.hstack((np.real(H), np.imag(H))), UH, runs) - HH = HRI[:, : len(f)] + 1j * HRI[:, len(f) :] - - # Step 2: Fit filter and evaluate uncertainties (Monte Carlo method) - AB = np.zeros((runs, Nb + Na + 1)) - Tau = np.zeros((runs,)) - for k in range(runs): - bi, ai, Tau[k] = invLSIIR(HH[k, :], Nb, Na, f, Fs, tau, verbose=False) - AB[k, :] = np.hstack((ai[1:], bi)) - - bi = np.mean(AB[:, Na:], axis=0) - ai = np.hstack((np.array([1.0]), np.mean(AB[:, :Na], axis=0))) - Uab = np.cov(AB, rowvar=False) - tau = np.mean(Tau) - return bi, ai, tau, Uab +def invLSFIR_uncMC( + H: np.ndarray, + UH: np.ndarray, + N: int, + tau: int, + f: np.ndarray, + Fs: float, + verbose: Optional[bool] = True, + mc_runs: Optional[int] = 10000, +) -> Tuple[np.ndarray, np.ndarray]: + """Design of FIR filter as fit to the reciprocal of a freq. resp. with uncertainties + + This essentially is a wrapper for a call of :func:`LSFIR` with the according + parameter set. + """ + return LSFIR(H, N, f, Fs, tau, verbose=verbose, inv=True, UH=UH, mc_runs=mc_runs) diff --git a/src/PyDynamic/uncertainty/interpolate.py b/src/PyDynamic/uncertainty/interpolate.py index c151ded11..b3c3a5e04 100644 --- a/src/PyDynamic/uncertainty/interpolate.py +++ b/src/PyDynamic/uncertainty/interpolate.py @@ -1,20 +1,22 @@ -# -*- coding: utf-8 -*- -""" +"""This module assists in uncertainty propagation for 1-dimensional interpolation + The :mod:`PyDynamic.uncertainty.interpolate` module implements methods for the propagation of uncertainties in the application of standard interpolation methods as provided by :class:`scipy.interpolate.interp1d`. -This module contains the following function: +This module contains the following functions: * :func:`interp1d_unc`: Interpolate arbitrary time series considering the associated uncertainties +* :func:`make_equidistant`: Interpolate a 1-D function equidistantly considering + associated uncertainties + """ +__all__ = ["interp1d_unc", "make_equidistant"] from typing import Optional, Tuple, Union import numpy as np -from scipy.interpolate import interp1d, splrep, BSpline - -__all__ = ["interp1d_unc", "make_equidistant"] +from scipy.interpolate import BSpline, interp1d, splrep def interp1d_unc( @@ -46,88 +48,91 @@ def interp1d_unc( Parameters ---------- - x_new : (M,) array_like - A 1-D array of real values to evaluate the interpolant at. x_new can be - sorted in any order. - x : (N,) array_like - A 1-D array of real values. - y : (N,) array_like - A 1-D array of real values. The length of y must be equal to the length - of x. - uy : (N,) array_like - A 1-D array of real values representing the standard uncertainties - associated with y. - kind : str, optional - Specifies the kind of interpolation for y as a string ('previous', - 'next', 'nearest', 'linear' or 'cubic'). Default is ‘linear’. - copy : bool, optional - If True, the method makes internal copies of x and y. If False, - references to x and y are used. The default is to copy. - bounds_error : bool, optional - If True, a ValueError is raised any time interpolation is attempted on a - value outside of the range of x (where extrapolation is necessary). If - False, out of bounds values are assigned fill_value. By default, an error - is raised unless `fill_value="extrapolate"`. - fill_value : array-like or (array-like, array_like) or “extrapolate”, optional - - - if or float, this value will be used to fill in for requested points - outside of the data range. If not provided, then the default is NaN. - - If a two-element tuple, then the first element is used as a fill value - for `x_new < t[0]` and the second element is used for `x_new > t[-1]`. - Anything that is not a 2-element tuple (e.g., list or ndarray, regardless - of shape) is taken to be a single array-like argument meant to be used - for both bounds as `below, above = fill_value, fill_value`. - - If “extrapolate”, then points outside the data range will be set - to the first or last element of the values. - - If cubic-interpolation, C2-continuity at the transition to the - extrapolation-range is not guaranteed. This behavior might change - in future implementations, see issue #210 for details. - - Both parameters `fill_value` and `fill_unc` should be - provided to ensure desired behaviour in the extrapolation range. - - fill_unc : array-like or (array-like, array_like) or “extrapolate”, optional - Usage and behaviour as described in `fill_value` but for the - uncertainties. Both parameters `fill_value` and `fill_unc` should be - provided to ensure desired behaviour in the extrapolation range. - assume_sorted : bool, optional - If False, values of x can be in any order and they are sorted first. If - True, x has to be an array of monotonically increasing values. - returnC : bool, optional - If True, return sensitivity coefficients for later use. This is only - available for interpolation kind 'linear' and for - fill_unc="extrapolate" at the moment. If False sensitivity - coefficients are not returned and internal computation is - slightly more efficient. - - If `returnC` is False, which is the default behaviour, the method returns: + x_new : (M,) array_like + A 1-D array of real values to evaluate the interpolant at. x_new can be + sorted in any order. + x : (N,) array_like + A 1-D array of real values. + y : (N,) array_like + A 1-D array of real values. The length of y must be equal to the length + of x. + uy : (N,) array_like + A 1-D array of real values representing the standard uncertainties + associated with y. + kind : str, optional + Specifies the kind of interpolation for y as a string ('previous', + 'next', 'nearest', 'linear' or 'cubic'). Default is ‘linear’. + copy : bool, optional + If True, the method makes internal copies of x and y. If False, + references to x and y are used. The default is to copy. + bounds_error : bool, optional + If True, a ValueError is raised any time interpolation is attempted on a + value outside of the range of x (where extrapolation is necessary). If + False, out of bounds values are assigned fill_value. By default, an error + is raised unless ``fill_value="extrapolate"``. + fill_value : array-like or (array-like, array_like) or “extrapolate”, optional + - if a ndarray (or float), this value will be used to fill in for + requested points outside of the data range. If not provided, then the + default is NaN. The array-like must broadcast properly to the dimensions + of the non-interpolation axes. + - If a two-element tuple, then the first element is used as a fill value + for ``x_new < t[0]`` and the second element is used for ``x_new > t[-1]``. + Anything that is not a 2-element tuple (e.g., list or ndarray, regardless + of shape) is taken to be a single array-like argument meant to be used + for both bounds as ``below, above = fill_value, fill_value``. + - If “extrapolate”, then points outside the data range will be set + to the first or last element of the values. + - If cubic-interpolation, C2-continuity at the transition to the + extrapolation-range is not guaranteed. This behavior might change + in future implementations, see issue #210 for details. + + Both parameters fill_value and fill_unc should be + provided to ensure desired behaviour in the extrapolation range. + + fill_unc : array-like or (array-like, array_like) or “extrapolate”, optional + Usage and behaviour as described in fill_value but for the + uncertainties. Both parameters fill_value and fill_unc should be + provided to ensure desired behaviour in the extrapolation range. + assume_sorted : bool, optional + If False, values of x can be in any order and they are sorted first. If + True, x has to be an array of monotonically increasing values. + returnC : bool, optional + If True, return sensitivity coefficients for later use. This is only + available for interpolation kind 'linear' and for + fill_unc="extrapolate" at the moment. If False sensitivity + coefficients are not returned and internal computation is + slightly more efficient. + + + If returnC is False, which is the default behaviour, the method returns: Returns ------- - x_new : (M,) array_like - values at which the interpolant is evaluated - y_new : (M,) array_like - interpolated values - uy_new : (M,) array_like - interpolated associated standard uncertainties + x_new : (M,) array_like + values at which the interpolant is evaluated + y_new : (M,) array_like + interpolated values + uy_new : (M,) array_like + interpolated associated standard uncertainties + Otherwise the method returns: Returns ------- - x_new : (M,) array_like - values at which the interpolant is evaluated - y_new : (M,) array_like - interpolated values - uy_new : (M,) array_like - interpolated associated standard uncertainties - C : (M,N) array_like - sensitivity matrix :math:`C`, which is used to compute the uncertainties - :math:`U_{y_{new}} = C \cdot \operatorname{diag}(u_y^2) \cdot C^T` + x_new : (M,) array_like + values at which the interpolant is evaluated + y_new : (M,) array_like + interpolated values + uy_new : (M,) array_like + interpolated associated standard uncertainties + C : (M,N) array_like + sensitivity matrix :math:`C`, which is used to compute the uncertainties + :math:`U_{y_{new}} = C \cdot \operatorname{diag}(u_y^2) \cdot C^T` References ---------- - * White [White2017]_ + * White [White2017]_ """ # This is taken from the class scipy.interpolate.interp1d to copy and sort the # arrays in case that is requested and of course extended by the uncertainties. diff --git a/src/PyDynamic/uncertainty/interpolation.py b/src/PyDynamic/uncertainty/interpolation.py index 323a3f2bc..2df71876e 100644 --- a/src/PyDynamic/uncertainty/interpolation.py +++ b/src/PyDynamic/uncertainty/interpolation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ .. deprecated:: 2.0.0 The module :mod:`PyDynamic.uncertainty.interpolation` is renamed to @@ -12,8 +11,7 @@ def interp1d_unc(*args, **kwargs): """ .. deprecated:: 2.0.0 - Please use :func:`PyDynamic.uncertainty.interpolate.interp1d_unc` - """ + Please use :func:`PyDynamic.uncertainty.interpolate.interp1d_unc`""" raise DeprecationWarning( "The module *interpolation* is renamed to " ":mod:`PyDynamic.uncertainty.interpolate` since the last major release 2.0.0. " diff --git a/src/PyDynamic/uncertainty/propagate_DWT.py b/src/PyDynamic/uncertainty/propagate_DWT.py index 48c72bcb2..1fccb01c6 100644 --- a/src/PyDynamic/uncertainty/propagate_DWT.py +++ b/src/PyDynamic/uncertainty/propagate_DWT.py @@ -1,6 +1,5 @@ -# -*- coding: utf-8 -*- +"""This module assists in uncertainty propagation for the discrete wavelet transform -""" The :mod:`PyDynamic.uncertainty.propagate_DWT` module implements methods for the propagation of uncertainties in the application of the discrete wavelet transform (DWT). @@ -16,12 +15,6 @@ * :func:`dwt_max_level`: return the maximum achievable DWT level """ - -import numpy as np -import pywt - -from .propagate_filter import IIRuncFilter, IIR_get_initial_state - __all__ = [ "dwt", "wave_dec", @@ -32,6 +25,11 @@ "dwt_max_level", ] +import numpy as np +import pywt + +from .propagate_filter import IIRuncFilter, IIR_get_initial_state + def dwt(x, Ux, lowpass, highpass, states=None, realtime=False, subsample_start=1): """ @@ -416,30 +414,31 @@ def wave_dec_realtime(x, Ux, lowpass, highpass, n=1, level_states=None): def wave_rec(coeffs, Ucoeffs, lowpass, highpass, original_length=None): """Multilevel discrete wavelet reconstruction from levels back into time-series - Parameters: - ----------- - coeffs : list of arrays - order of arrays within list is: - [cAn, cDn, cDn-1, ..., cD2, cD1] - where: - - * cAi: approximation coefficients array from i-th level - * cDi: detail coefficients array from i-th level - Ucoeffs : list of arrays - uncertainty of coeffs, same order as coeffs - lowpass : np.ndarray - reconstruction low-pass for wavelet_block - highpass : np.ndarray - reconstruction high-pass for wavelet_block - original_length : int, optional (default: None) - necessary to restore correct length of original time-series + Parameters + ---------- + coeffs : list of arrays + order of arrays within list is: + [cAn, cDn, cDn-1, ..., cD2, cD1] + where: + + * cAi: approximation coefficients array from i-th level + * cDi: detail coefficients array from i-th level + + Ucoeffs : list of arrays + uncertainty of coeffs, same order as coeffs + lowpass : np.ndarray + reconstruction low-pass for wavelet_block + highpass : np.ndarray + reconstruction high-pass for wavelet_block + original_length : int, optional (default: None) + necessary to restore correct length of original time-series Returns ------- - x : np.ndarray - reconstructed signal - Ux : np.ndarray - uncertainty of reconstructed signal + x : np.ndarray + reconstructed signal + Ux : np.ndarray + uncertainty of reconstructed signal """ # init the approximation coefficients diff --git a/src/PyDynamic/uncertainty/propagate_filter.py b/src/PyDynamic/uncertainty/propagate_filter.py index c4054d2f6..2f41fc2e5 100644 --- a/src/PyDynamic/uncertainty/propagate_filter.py +++ b/src/PyDynamic/uncertainty/propagate_filter.py @@ -16,6 +16,7 @@ is known and that noise is stationary! """ +import warnings import numpy as np from scipy.linalg import solve, solve_discrete_lyapunov, toeplitz @@ -43,12 +44,12 @@ def _fir_filter(x, theta, Ux=None, Utheta=None, initial_conditions="constant"): covariance matrix associated with x if the signal is fully certain, use `Ux = None` (default) to make use of more efficient calculations. Utheta : np.ndarray, optional - covariance matrix associated with theta - if the filter is fully certain, use `Utheta = None` (default) to make use of more efficient calculations. + covariance matrix associated with theta. If the filter is fully certain, + do not provide Utheta to make use of more efficient calculations. see also the comparison given in initial_conditions : str, optional - constant: assume signal + uncertainty are constant before t=0 (default) - zero: assume signal + uncertainty are zero before t=0 + - "constant": assume signal + uncertainty are constant before t=0 (default) + - "zero": assume signal + uncertainty are zero before t=0 Returns @@ -110,22 +111,33 @@ def _fir_filter(x, theta, Ux=None, Utheta=None, initial_conditions="constant"): ) # calc subterm theta^T * Ux * theta - Uy += convolve(np.outer(theta, theta), Ux_extended, mode="valid") + Uy += _clip_main_diagonal_to_zero_from_below( + convolve(np.outer(theta, theta), Ux_extended, mode="valid") + ) if Utheta is not None: ## extend signal Ntheta steps into the past x_extended = np.r_[np.full((Ntheta - 1), x0), x] # calc subterm x^T * Utheta * x - Uy += convolve(np.outer(x_extended, x_extended), Utheta, mode="valid") + Uy += _clip_main_diagonal_to_zero_from_below( + convolve(np.outer(x_extended, x_extended), Utheta, mode="valid") + ) if (Ux is not None) and (Utheta is not None): # calc subterm Tr(Ux * Utheta) - Uy += convolve(Ux_extended, Utheta.T, mode="valid") + Uy += _clip_main_diagonal_to_zero_from_below( + convolve(Ux_extended, Utheta.T, mode="valid") + ) return y, Uy +def _clip_main_diagonal_to_zero_from_below(matrix: np.ndarray) -> np.ndarray: + np.fill_diagonal(matrix, matrix.diagonal().clip(min=0)) + return matrix + + def _fir_filter_diag( x, theta, Ux_diag=None, Utheta_diag=None, initial_conditions="constant" ): @@ -206,24 +218,28 @@ def _fir_filter_diag( Ux_diag_extended = np.r_[np.full((Ntheta - 1), Ux0), Ux_diag] # calc subterm theta^T * Ux * theta - Uy_diag += convolve(np.square(theta), Ux_diag_extended, mode="valid") + Uy_diag += convolve(np.square(theta), Ux_diag_extended, mode="valid").clip( + min=0 + ) if Utheta_diag is not None: ## extend signal Ntheta steps into the past x_extended = np.r_[np.full((Ntheta - 1), x0), x] # calc subterm x^T * Utheta * x - Uy_diag += convolve(np.square(x_extended), Utheta_diag, mode="valid") + Uy_diag += convolve(np.square(x_extended), Utheta_diag, mode="valid").clip( + min=0 + ) if (Ux_diag is not None) and (Utheta_diag is not None): # calc subterm Tr(Ux * Utheta) - Uy_diag += convolve(Ux_diag_extended, Utheta_diag, mode="valid") + Uy_diag += convolve(Ux_diag_extended, Utheta_diag, mode="valid").clip(min=0) return y, Uy_diag def _stationary_prepend_covariance(U, n): - """ Prepend covariance matrix U by n steps into the past""" + """Prepend covariance matrix U by n steps into the past""" c = np.r_[U[:, 0], np.zeros(n)] r = np.r_[U[0, :], np.zeros(n)] @@ -257,25 +273,28 @@ def FIRuncFilter( y : np.ndarray filter input signal sigma_noise : float or np.ndarray - float: standard deviation of white noise in y - 1D-array: interpretation depends on kind - 2D-array: full covariance of input + - float: standard deviation of white noise in y + - 1D-array: interpretation depends on kind + - 2D-array: full covariance of input + theta : np.ndarray FIR filter coefficients Utheta : np.ndarray, optional - 1D-array: coefficient-wise standard uncertainties of filter - 2D-array: covariance matrix associated with theta + - 1D-array: coefficient-wise standard uncertainties of filter + - 2D-array: covariance matrix associated with theta + if the filter is fully certain, use `Utheta = None` (default) to make use of more efficient calculations. see also the comparison given in shift : int, optional time delay of filter output signal (in samples) (defaults to 0) blow : np.ndarray, optional optional FIR low-pass filter - kind : string + kind : string, optional only meaningful in combination with sigma_noise a 1D numpy array - "diag": point-wise standard uncertainties of non-stationary white noise - "corr": single sided autocovariance of stationary (colored/correlated) - noise (default) + + - "diag": point-wise standard uncertainties of non-stationary white noise + - "corr": single sided autocovariance of stationary colored noise (default) + return_full_covariance : bool, optional whether or not to return a full covariance of the output, defaults to False @@ -284,9 +303,10 @@ def FIRuncFilter( x : np.ndarray FIR filter output signal Ux : np.ndarray - return_full_covariance == False : point-wise standard uncertainties associated with x (default) - return_full_covariance == True : covariance matrix containing uncertainties associated with x - + - return_full_covariance == False : point-wise standard uncertainties + associated with x (default) + - return_full_covariance == True : covariance matrix containing uncertainties + associated with x References ---------- @@ -356,7 +376,15 @@ def FIRuncFilter( x, Ux_diag = _fir_filter_diag( y, theta, Uy_diag, Utheta_diag, initial_conditions="constant" ) - return x, np.sqrt(np.abs(Ux_diag)) + + Ux_diag = np.sqrt(np.abs(Ux_diag)) + + # shift result + if shift != 0: + x = np.roll(x, -int(shift)) + Ux_diag = np.roll(Ux_diag, -int(shift)) + + return x, Ux_diag # otherwise, the method computes full covariance information else: @@ -404,7 +432,7 @@ def FIRuncFilter( # shift result if shift != 0: x = np.roll(x, -int(shift)) - Ux = np.roll(Ux, (-int(shift), -int(shift))) + Ux = np.roll(Ux, (-int(shift), -int(shift)), axis=(0, 1)) if return_full_covariance: return x, Ux @@ -477,15 +505,14 @@ def IIRuncFilter(x, Ux, b, a, Uab=None, state=None, kind="corr"): if not isinstance(Ux, np.ndarray): Ux = np.full(x.shape, Ux) # translate iid noise to vector - if kind is not "diag": + if kind != "diag": kind = "diag" - raise UserWarning( - "Ux of type float and `kind='{KIND}'` was given. To ensure the behavior " - "described in the docstring (float -> standard deviation of white noise " - " in x), `kind='diag'` is set. \n" - "To suppress this warning, explicitly set `kind='diag'`".format( - KIND=kind - ) + warnings.warn( + f"Ux of type float and `kind='{kind}'` was given. To ensure the " + f"behavior described in the docstring (float -> standard deviation of " + f"white noise in x), `kind='diag'` is set. \n(" + "To suppress this warning, explicitly set `kind='diag'`", + category=UserWarning, ) # system, corr_unc and processed_input are cached as well to reduce computational load diff --git a/test/conftest.py b/test/conftest.py index 314346c1a..177e5e349 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -3,9 +3,11 @@ import numpy as np import pytest +import scipy.stats as stats from hypothesis import assume, HealthCheck, settings, strategies as hst from hypothesis.extra import numpy as hnp from hypothesis.strategies import composite, SearchStrategy +from numpy.linalg import LinAlgError from PyDynamic import make_semiposdef from PyDynamic.misc.tools import normalize_vector_or_matrix @@ -18,7 +20,7 @@ settings.register_profile( name="ci", suppress_health_check=(HealthCheck.too_slow,), deadline=None ) -if "CIRCLECI" in os.environ: +if os.getenv("CIRCLECI") == "true": settings.load_profile("ci") @@ -57,8 +59,12 @@ def hypothesis_float_matrix( number_of_rows: Optional[int] = None, number_of_cols: Optional[int] = None, ) -> np.ndarray: - number_of_rows = draw(hypothesis_dimension(number_of_rows)) - number_of_cols = draw(hypothesis_dimension(number_of_cols)) + number_of_rows = draw( + hypothesis_dimension(min_value=number_of_rows, max_value=number_of_rows) + ) + number_of_cols = draw( + hypothesis_dimension(min_value=number_of_cols, max_value=number_of_cols) + ) return draw( hypothesis_float_matrix_strategy( number_of_rows=number_of_rows, number_of_cols=number_of_cols @@ -89,7 +95,7 @@ def hypothesis_nonzero_complex_vector( min_magnitude: Optional[float] = 1e-4, max_magnitude: Optional[float] = 1e4, ) -> np.ndarray: - number_of_elements = draw(hypothesis_dimension(length)) + number_of_elements = draw(hypothesis_dimension(min_value=length, max_value=length)) complex_vector = draw( hnp.arrays( dtype=complex, @@ -123,7 +129,7 @@ def hypothesis_float_vector( exclude_min: Optional[bool] = False, exclude_max: Optional[bool] = False, ) -> np.ndarray: - number_of_elements = draw(hypothesis_dimension(length)) + number_of_elements = draw(hypothesis_dimension(min_value=length, max_value=length)) return draw( hnp.arrays( dtype=float, @@ -140,8 +146,17 @@ def hypothesis_float_vector( ) -def hypothesis_not_negative_float_strategy() -> SearchStrategy: - return hst.floats(min_value=0) +def hypothesis_not_negative_float_strategy( + max_value: Optional[float] = None, + allow_nan: Optional[bool] = False, + allow_infinity: Optional[bool] = False, +) -> SearchStrategy: + return hst.floats( + min_value=0, + max_value=max_value, + allow_nan=allow_nan, + allow_infinity=allow_infinity, + ) def hypothesis_bounded_float_strategy( @@ -154,15 +169,24 @@ def hypothesis_bounded_float_strategy( return hst.floats( min_value=min_value, max_value=max_value, - include_min=exclude_min, - include_max=exclude_max, + exclude_min=exclude_min, + exclude_max=exclude_max, allow_infinity=allow_infinity, ) @composite -def hypothesis_not_negative_float(draw: Callable) -> float: - return draw(hypothesis_not_negative_float_strategy) +def hypothesis_not_negative_float( + draw: Callable, + max_value: Optional[float] = None, + allow_nan: Optional[bool] = False, + allow_infinity: Optional[bool] = False, +) -> float: + return draw( + hypothesis_not_negative_float_strategy( + max_value=max_value, allow_nan=allow_nan, allow_infinity=allow_infinity + ) + ) @composite @@ -192,7 +216,9 @@ def hypothesis_covariance_matrix( min_value: Optional[float] = 0, max_value: Optional[float] = 1, ) -> np.ndarray: - number_of_rows_and_columns = draw(hypothesis_dimension(number_of_rows)) + number_of_rows_and_columns = draw( + hypothesis_dimension(min_value=number_of_rows, max_value=number_of_rows) + ) cov_with_one_eigenvalue_close_to_zero = np.cov( draw( hnp.arrays( @@ -217,7 +243,7 @@ def hypothesis_covariance_matrix( nonzero_diagonal_cov = draw( ensure_hypothesis_nonzero_diagonal(cov_after_discarding_smallest_singular_value) ) - scaled_cov = _scale_matrix_or_vector_to_range( + scaled_cov = scale_matrix_or_vector_to_range( nonzero_diagonal_cov, range_min=min_value, range_max=max_value ) assume(np.all(np.linalg.eigvals(scaled_cov) >= 0)) @@ -237,12 +263,16 @@ def ensure_hypothesis_nonzero_diagonal( ) -def _scale_matrix_or_vector_to_range( +def scale_matrix_or_vector_to_range( array: np.ndarray, range_min: Optional[float] = 0, range_max: Optional[float] = 1 ) -> np.ndarray: return normalize_vector_or_matrix(array) * (range_max - range_min) + range_min +def scale_matrix_or_vector_to_convex_combination(array: np.ndarray) -> np.ndarray: + return array / np.sum(array) + + @composite def hypothesis_covariance_matrix_with_zero_correlation( draw: Callable, number_of_rows: Optional[int] = None @@ -253,11 +283,21 @@ def hypothesis_covariance_matrix_with_zero_correlation( @composite -def hypothesis_dimension(draw: Callable, dimension: Optional[int] = None) -> int: +def hypothesis_dimension( + draw: Callable, + min_value: Optional[int] = None, + max_value: Optional[int] = None, +) -> int: + minimum_dimension = min_value if min_value is not None else 1 + maximum_dimension = max_value if max_value is not None else 20 return ( - dimension - if dimension is not None - else draw(hypothesis_reasonable_dimension_strategy()) + minimum_dimension + if minimum_dimension is not None and minimum_dimension == maximum_dimension + else draw( + hypothesis_reasonable_dimension_strategy( + min_value=minimum_dimension, max_value=maximum_dimension + ) + ) ) @@ -305,8 +345,12 @@ def random_covariance_matrix(length: Optional[int]) -> np.ndarray: cov_with_one_eigenvalue_close_to_zero ) cov_positive_semi_definite = cov_after_discarding_smallest_singular_value - while np.any(np.linalg.eigvals(cov_positive_semi_definite) < 0): - cov_positive_semi_definite = make_semiposdef(cov_positive_semi_definite) + while True: + try: + stats.multivariate_normal(cov=cov_positive_semi_definite) + break + except LinAlgError: + cov_positive_semi_definite = make_semiposdef(cov_positive_semi_definite) return cov_positive_semi_definite diff --git a/test/reference_arrays/test_LSFIR_H.npz b/test/reference_arrays/test_LSFIR_H.npz new file mode 100644 index 000000000..962a50bf6 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_H.npz differ diff --git a/test/reference_arrays/test_LSFIR_HbF.npz b/test/reference_arrays/test_LSFIR_HbF.npz new file mode 100644 index 000000000..39b6c85fe Binary files /dev/null and b/test/reference_arrays/test_LSFIR_HbF.npz differ diff --git a/test/reference_arrays/test_LSFIR_Hf.npz b/test/reference_arrays/test_LSFIR_Hf.npz new file mode 100644 index 000000000..583cf5e5c Binary files /dev/null and b/test/reference_arrays/test_LSFIR_Hf.npz differ diff --git a/test/reference_arrays/test_LSFIR_UH.npz b/test/reference_arrays/test_LSFIR_UH.npz new file mode 100644 index 000000000..9229d819a Binary files /dev/null and b/test/reference_arrays/test_LSFIR_UH.npz differ diff --git a/test/reference_arrays/test_LSFIR_UbF.npz b/test/reference_arrays/test_LSFIR_UbF.npz new file mode 100644 index 000000000..902dcad01 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_UbF.npz differ diff --git a/test/reference_arrays/test_LSFIR_Uxhat.npz b/test/reference_arrays/test_LSFIR_Uxhat.npz new file mode 100644 index 000000000..8867d23c9 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_Uxhat.npz differ diff --git a/test/reference_arrays/test_LSFIR_bF.npz b/test/reference_arrays/test_LSFIR_bF.npz new file mode 100644 index 000000000..a38bd7f85 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_bF.npz differ diff --git a/test/reference_arrays/test_LSFIR_blow.npz b/test/reference_arrays/test_LSFIR_blow.npz new file mode 100644 index 000000000..f8c0c4cbe Binary files /dev/null and b/test/reference_arrays/test_LSFIR_blow.npz differ diff --git a/test/reference_arrays/test_LSFIR_shift.npz b/test/reference_arrays/test_LSFIR_shift.npz new file mode 100644 index 000000000..f67a4f0a8 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_shift.npz differ diff --git a/test/reference_arrays/test_LSFIR_uAbs.npz b/test/reference_arrays/test_LSFIR_uAbs.npz new file mode 100644 index 000000000..98b8d8057 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_uAbs.npz differ diff --git a/test/reference_arrays/test_LSFIR_uPhas.npz b/test/reference_arrays/test_LSFIR_uPhas.npz new file mode 100644 index 000000000..4284d8288 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_uPhas.npz differ diff --git a/test/reference_arrays/test_LSFIR_x.npz b/test/reference_arrays/test_LSFIR_x.npz new file mode 100644 index 000000000..6beadc1d3 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_x.npz differ diff --git a/test/reference_arrays/test_LSFIR_xhat.npz b/test/reference_arrays/test_LSFIR_xhat.npz new file mode 100644 index 000000000..231993849 Binary files /dev/null and b/test/reference_arrays/test_LSFIR_xhat.npz differ diff --git a/test/test_DFT.py b/test/test_DFT.py index 159210fed..c5e4c004a 100644 --- a/test/test_DFT.py +++ b/test/test_DFT.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Perform tests on methods to handle DFT and inverse DFT.""" from typing import Callable, Dict, Optional, Tuple, Union @@ -10,6 +9,7 @@ from numpy.testing import assert_allclose, assert_almost_equal from PyDynamic.misc.testsignals import multi_sine + # noinspection PyProtectedMember from PyDynamic.uncertainty.propagate_DFT import ( _apply_window, @@ -21,9 +21,9 @@ ) from .conftest import ( check_no_nans_and_infs, - hypothesis_float_vector, hypothesis_float_matrix, hypothesis_float_square_matrix_strategy, + hypothesis_float_vector, hypothesis_not_negative_float_strategy, VectorAndCompatibleMatrix, ) @@ -186,6 +186,7 @@ def test_AmpPhasePropagation(self, multisine_testsignal): assert_almost_equal(np.max(np.abs(testsignal - x)), 0) +@pytest.mark.slow def test_compose_DFT_and_iDFT_with_full_covariance(multisine_testsignal, corrmatrix): """Test GUM_DFT and GUM_iDFT with full covariance matrix""" x, ux = multisine_testsignal diff --git a/test/test_DFT_deconv.py b/test/test_DFT_deconv.py index 94ac1c2a1..a1c9865b6 100644 --- a/test/test_DFT_deconv.py +++ b/test/test_DFT_deconv.py @@ -190,6 +190,7 @@ def _compute_covariance_of_multivariate_monte_carlo_samples( @given(deconvolution_input(reveal_bug=True)) @settings(deadline=None) +@pytest.mark.slow def test_reveal_bug_in_dft_deconv_up_to_1_9( multivariate_complex_monte_carlo, complex_deconvolution_on_sets, DFT_deconv_input ): diff --git a/test/test_LSFIR.py b/test/test_LSFIR.py new file mode 100644 index 000000000..7dae850ae --- /dev/null +++ b/test/test_LSFIR.py @@ -0,0 +1,1190 @@ +import os +import pathlib +from typing import Callable, cast, Optional, Union + +import hypothesis.strategies as hst +import numpy as np +import pytest +import scipy.signal as dsp +from hypothesis import given, HealthCheck, settings +from hypothesis.strategies import composite +from matplotlib import pyplot as plt +from numpy.testing import assert_allclose, assert_almost_equal + +from PyDynamic.misc.filterstuff import kaiser_lowpass +from PyDynamic.misc.SecondOrderSystem import sos_phys2filter +from PyDynamic.misc.tools import make_semiposdef + +# noinspection PyProtectedMember +from PyDynamic.model_estimation.fit_filter import ( + _assemble_complex_from_real_imag, + _assemble_real_imag_from_complex, + invLSFIR, + invLSFIR_unc, + invLSFIR_uncMC, + LSFIR, +) +from PyDynamic.uncertainty.propagate_filter import FIRuncFilter +from .conftest import ( + hypothesis_dimension, + hypothesis_float_vector, + scale_matrix_or_vector_to_convex_combination, +) + + +@pytest.fixture(scope="module") +def random_number_generator(): + return np.random.default_rng(1) + + +@pytest.fixture(scope="module") +def measurement_system(): + f0 = 36e3 + S0 = 0.4 + delta = 0.01 + return {"f0": f0, "S0": S0, "delta": delta} + + +@composite +def weights( + draw: Callable, guarantee_vector: Optional[bool] = False +) -> Union[np.ndarray, None]: + valid_vector_strategy = hypothesis_float_vector( + length=400, min_value=0, max_value=1, exclude_min=True + ) + valid_weight_strategies = ( + valid_vector_strategy + if guarantee_vector + else (valid_vector_strategy, hst.just(None)) + ) + unscaled_weights = draw(hst.one_of(valid_weight_strategies)) + if unscaled_weights is not None: + return scale_matrix_or_vector_to_convex_combination(unscaled_weights) + + +@pytest.fixture(scope="module") +def sampling_freq(): + return 500e3 + + +@pytest.fixture(scope="module") +def freqs(): + return np.linspace(0, 120e3, 200) + + +@pytest.fixture(scope="module") +def monte_carlo( + measurement_system, random_number_generator, sampling_freq, freqs, complex_freq_resp +): + udelta = 0.1 * measurement_system["delta"] + uS0 = 0.001 * measurement_system["S0"] + uf0 = 0.01 * measurement_system["f0"] + + runs = 10000 + MCS0 = ( + measurement_system["S0"] + random_number_generator.standard_normal(runs) * uS0 + ) + MCd = ( + measurement_system["delta"] + + random_number_generator.standard_normal(runs) * udelta + ) + MCf0 = ( + measurement_system["f0"] + random_number_generator.standard_normal(runs) * uf0 + ) + HMC = np.zeros((runs, len(freqs)), dtype=complex) + for k in range(runs): + bc_, ac_ = sos_phys2filter(MCS0[k], MCd[k], MCf0[k]) + b_, a_ = dsp.bilinear(bc_, ac_, sampling_freq) + HMC[k, :] = dsp.freqz(b_, a_, 2 * np.pi * freqs / sampling_freq)[1] + + H = _assemble_real_imag_from_complex(complex_freq_resp) + assert_allclose( + H, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_H.npz", + ), + )["H"], + ) + uAbs = np.std(np.abs(HMC), axis=0) + assert_allclose( + uAbs, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_uAbs.npz", + ), + )["uAbs"], + rtol=3.5e-2, + ) + uPhas = np.std(np.angle(HMC), axis=0) + assert_allclose( + uPhas, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_uPhas.npz", + ), + )["uPhas"], + rtol=4.3e-2, + ) + UH = np.cov(np.hstack((np.real(HMC), np.imag(HMC))), rowvar=False) + UH = make_semiposdef(UH) + assert_allclose( + UH, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_UH.npz", + ), + )["UH"], + atol=1, + ) + return {"H": H, "uAbs": uAbs, "uPhas": uPhas, "UH": UH} + + +@pytest.fixture(scope="module") +def complex_H_with_UH(monte_carlo): + return { + "H": _assemble_complex_from_real_imag(monte_carlo["H"]), + "UH": monte_carlo["UH"], + } + + +@pytest.fixture(scope="module") +def digital_filter(measurement_system, sampling_freq): + # transform continuous system to digital filter + bc, ac = sos_phys2filter( + measurement_system["S0"], measurement_system["delta"], measurement_system["f0"] + ) + assert_almost_equal(bc, [20465611686.098896]) + assert_allclose(ac, np.array([1.00000000e00, 4.52389342e03, 5.11640292e10])) + b, a = dsp.bilinear(bc, ac, sampling_freq) + assert_allclose( + b, np.array([0.019386043211510096, 0.03877208642302019, 0.019386043211510096]) + ) + assert_allclose(a, np.array([1.0, -1.7975690550957188, 0.9914294872108197])) + return {"b": b, "a": a} + + +@pytest.fixture(scope="module") +def complex_freq_resp(measurement_system, sampling_freq, freqs, digital_filter): + Hf = dsp.freqz( + digital_filter["b"], + digital_filter["a"], + 2 * np.pi * freqs / sampling_freq, + )[1] + assert_allclose( + Hf, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_Hf.npz", + ), + )["Hf"], + ) + return Hf + + +@pytest.fixture(scope="module") +def simulated_measurement_input_and_output( + sampling_freq, digital_filter, random_number_generator +): + Ts = 1 / sampling_freq + time = np.arange(0, 4e-3 - Ts, Ts) + # x = shocklikeGaussian(time, t0 = 2e-3, sigma = 1e-5, m0=0.8) + m0 = 0.8 + sigma = 1e-5 + t0 = 2e-3 + x = ( + -m0 + * (time - t0) + / sigma + * np.exp(0.5) + * np.exp(-((time - t0) ** 2) / (2 * sigma ** 2)) + ) + assert_allclose( + x, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_x.npz", + ), + )["x"], + ) + y = dsp.lfilter(digital_filter["b"], digital_filter["a"], x) + noise = 1e-3 + yn = y + random_number_generator.standard_normal(np.size(y)) * noise + + return {"time": time, "x": x, "yn": yn, "noise": noise} + + +@pytest.fixture(scope="module") +def LSFIR_filter_fit(monte_carlo, freqs, sampling_freq): + N = 12 + tau = N // 2 + bF, UbF = LSFIR( + monte_carlo["H"], N, freqs, sampling_freq, tau, inv=True, UH=monte_carlo["UH"] + ) + assert np.all(np.linalg.eigvals(UbF) >= 0) + assert_allclose( + bF, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_bF.npz", + ), + )["bF"], + ) + assert_allclose( + UbF, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_UbF.npz", + ), + )["UbF"], + rtol=3e-1, + ) + return {"bF": bF, "UbF": UbF, "N": N, "tau": tau} + + +@pytest.fixture(scope="module") +def fir_low_pass(measurement_system, sampling_freq): + fcut = measurement_system["f0"] + 10e3 + low_order = 100 + blow, lshift = kaiser_lowpass(low_order, fcut, sampling_freq) + assert_allclose( + blow, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_blow.npz", + ), + )["blow"], + ) + return {"blow": blow, "lshift": lshift} + + +@pytest.fixture(scope="module") +def shift(simulated_measurement_input_and_output, LSFIR_filter_fit, fir_low_pass): + shift = (len(LSFIR_filter_fit["bF"]) - 1) // 2 + fir_low_pass["lshift"] + assert_allclose( + shift, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_shift.npz", + ), + )["shift"], + ) + return shift + + +@pytest.fixture(scope="module") +def fir_unc_filter( + shift, + simulated_measurement_input_and_output, + LSFIR_filter_fit, + fir_low_pass, +): + xhat, Uxhat = FIRuncFilter( + simulated_measurement_input_and_output["yn"], + simulated_measurement_input_and_output["noise"], + LSFIR_filter_fit["bF"], + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_UbF.npz", + ), + )["UbF"], + shift, + fir_low_pass["blow"], + ) + return {"xhat": xhat, "Uxhat": Uxhat} + + +def test_digital_deconvolution_FIR_example_figure_1( + freqs, complex_freq_resp, monte_carlo +): + plt.figure(figsize=(16, 8)) + plt.errorbar( + freqs * 1e-3, + np.abs(complex_freq_resp), + monte_carlo["uAbs"], + fmt=".", + ) + plt.title("measured amplitude spectrum with associated uncertainties") + plt.xlim(0, 50) + plt.xlabel("frequency / kHz", fontsize=20) + plt.ylabel("magnitude / au", fontsize=20) + # plt.show() + + +def test_digital_deconvolution_FIR_example_figure_2( + freqs, complex_freq_resp, monte_carlo +): + plt.figure(figsize=(16, 8)) + plt.errorbar( + freqs * 1e-3, + np.angle(complex_freq_resp), + monte_carlo["uPhas"], + fmt=".", + ) + plt.title("measured phase spectrum with associated uncertainties") + plt.xlim(0, 50) + plt.xlabel("frequency / kHz", fontsize=20) + plt.ylabel("phase / rad", fontsize=20) + # plt.show() + + +def test_digital_deconvolution_FIR_example_figure_3( + simulated_measurement_input_and_output, +): + plt.figure(figsize=(16, 8)) + plt.plot( + simulated_measurement_input_and_output["time"] * 1e3, + simulated_measurement_input_and_output["x"], + label="system input signal", + ) + plt.plot( + simulated_measurement_input_and_output["time"] * 1e3, + simulated_measurement_input_and_output["yn"], + label="measured output " "signal", + ) + plt.legend(fontsize=20) + plt.xlim(1.8, 4) + plt.ylim(-1, 1) + plt.xlabel("time / ms", fontsize=20) + plt.ylabel(r"signal amplitude / $m/s^2$", fontsize=20) + # plt.show() + + +def test_digital_deconvolution_FIR_example_figure_4( + LSFIR_filter_fit, monte_carlo, freqs, sampling_freq +): + plt.figure(figsize=(16, 8)) + plt.errorbar( + range(len(LSFIR_filter_fit["bF"])), + LSFIR_filter_fit["bF"], + np.sqrt(np.diag(LSFIR_filter_fit["UbF"])), + fmt="o", + ) + plt.xlabel("FIR coefficient index", fontsize=20) + plt.ylabel("FIR coefficient value", fontsize=20) + # plt.show() + + +def test_digital_deconvolution_FIR_example_figure_5( + LSFIR_filter_fit, freqs, sampling_freq, complex_freq_resp, fir_low_pass +): + plt.figure(figsize=(16, 10)) + HbF = ( + dsp.freqz( + LSFIR_filter_fit["bF"], + 1, + 2 * np.pi * freqs / sampling_freq, + )[1] + * dsp.freqz(fir_low_pass["blow"], 1, 2 * np.pi * freqs / sampling_freq)[1] + ) + assert_allclose( + HbF, + np.load( + os.path.join( + pathlib.Path(__file__).parent.resolve(), + "reference_arrays", + "test_LSFIR_HbF.npz", + ), + )["HbF"], + ) + plt.semilogy( + freqs * 1e-3, + np.abs(complex_freq_resp), + label="measured frequency response", + ) + plt.semilogy(freqs * 1e-3, np.abs(HbF), label="inverse filter") + plt.semilogy( + freqs * 1e-3, + np.abs(complex_freq_resp * HbF), + label="compensation result", + ) + plt.legend() + # plt.show() + + +def test_digital_deconvolution_FIR_example_figure_6( + simulated_measurement_input_and_output, LSFIR_filter_fit, fir_unc_filter +): + plt.figure(figsize=(16, 8)) + plt.plot( + simulated_measurement_input_and_output["time"] * 1e3, + simulated_measurement_input_and_output["x"], + label="input signal", + ) + plt.plot( + simulated_measurement_input_and_output["time"] * 1e3, + simulated_measurement_input_and_output["yn"], + label="output signal", + ) + plt.plot( + simulated_measurement_input_and_output["time"] * 1e3, + fir_unc_filter["xhat"], + label="estimate of input", + ) + plt.legend(fontsize=20) + plt.xlabel("time / ms", fontsize=22) + plt.ylabel("signal amplitude / au", fontsize=22) + plt.tick_params(which="both", labelsize=16) + plt.xlim(1.9, 2.4) + plt.ylim(-1, 1) + + +def test_digital_deconvolution_FIR_example_figure_7( + simulated_measurement_input_and_output, + fir_unc_filter, +): + plt.figure(figsize=(16, 10)) + plt.plot( + simulated_measurement_input_and_output["time"] * 1e3, + fir_unc_filter["Uxhat"], + ) + plt.xlabel("time / ms") + plt.ylabel("signal uncertainty / au") + plt.subplots_adjust(left=0.15, right=0.95) + plt.tick_params(which="both", labelsize=16) + plt.xlim(1.9, 2.4) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_compare_LSFIR_with_zero_to_None_uncertainties_with_svd_for_fitting_one_over_H( + monte_carlo, freqs, sampling_freq, filter_order +): + b_fir_svd = LSFIR( + H=monte_carlo["H"], + UH=np.zeros_like(monte_carlo["UH"]), + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=True, + )[0] + b_fir_none = LSFIR( + H=monte_carlo["H"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=True, + UH=None, + )[0] + assert_allclose(b_fir_svd, b_fir_none) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_compare_LSFIR_with_zero_to_None_uncertainties_and_mc_for_fitting_one_over_H( + monte_carlo, freqs, sampling_freq, filter_order +): + b_fir_mc = LSFIR( + H=monte_carlo["H"], + UH=np.zeros_like(monte_carlo["UH"]), + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=True, + mc_runs=2, + )[0] + b_fir_none = LSFIR( + H=monte_carlo["H"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=True, + UH=None, + )[0] + assert_allclose(b_fir_mc, b_fir_none) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_compare_LSFIR_with_zero_to_None_uncertainties_and_mc_for_fitting_H_directly( + monte_carlo, freqs, sampling_freq, filter_order +): + b_fir_mc = LSFIR( + H=monte_carlo["H"], + UH=np.zeros_like(monte_carlo["UH"]), + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=False, + mc_runs=2, + )[0] + b_fir_none = LSFIR( + H=monte_carlo["H"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=False, + UH=None, + )[0] + assert_allclose(b_fir_mc, b_fir_none) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_usual_call_LSFIR_for_fitting_H_directly_with_svd( + monte_carlo, freqs, sampling_freq, filter_order +): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=monte_carlo["UH"], + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12), hst.booleans()) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +def test_usual_call_LSFIR_with_None_uncertainties( + monte_carlo, freqs, sampling_freq, filter_order, fit_reciprocal +): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=fit_reciprocal, + UH=None, + ) + + +@given( + hypothesis_dimension(min_value=2, max_value=12), + weights(), + hst.booleans(), + hst.booleans(), +) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.function_scoped_fixture, + ], +) +@pytest.mark.slow +def test_usual_call_LSFIR_with_mc( + capsys, monte_carlo, freqs, sampling_freq, filter_order, weight_vector, verbose, inv +): + with capsys.disabled(): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + weights=weight_vector, + verbose=verbose, + inv=inv, + UH=monte_carlo["UH"], + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings(deadline=None) +def test_LSFIR_with_too_short_H(monte_carlo, freqs, sampling_freq, filter_order): + too_short_H = monte_carlo["H"][1:] + with pytest.raises( + ValueError, + match=r"LSFIR: vector of complex frequency responses is expected to " + r"contain [0-9]+ elements, corresponding to the number of frequencies.*", + ): + LSFIR( + H=too_short_H, + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=monte_carlo["UH"], + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings(deadline=None) +def test_LSFIR_with_complex_but_too_short_H( + complex_H_with_UH, freqs, sampling_freq, filter_order +): + complex_h_but_too_short = complex_H_with_UH["H"][1:] + with pytest.raises( + ValueError, + match=r"LSFIR: vector of complex frequency responses is expected to " + r"contain [0-9]+ elements, corresponding to the number of frequencies.*", + ): + LSFIR( + H=complex_h_but_too_short, + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=complex_H_with_UH["UH"], + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings(deadline=None) +def test_LSFIR_with_too_short_f(monte_carlo, freqs, sampling_freq, filter_order): + too_short_f = freqs[1:] + with pytest.raises( + ValueError, + match=r"LSFIR: vector of complex frequency responses is expected to " + r"contain [0-9]+ elements, corresponding to the number of frequencies.*", + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=too_short_f, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=monte_carlo["UH"], + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.function_scoped_fixture, + ], +) +def test_LSFIR_with_too_short_UH(monte_carlo, freqs, sampling_freq, filter_order): + too_few_rows_UH = monte_carlo["UH"][1:] + with pytest.raises( + ValueError, + match=r"LSFIR: number of rows of uncertainties and number of " + r"elements of values are expected to match\..*", + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=too_few_rows_UH, + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.function_scoped_fixture, + ], +) +def test_LSFIR_with_nonsquare_UH(monte_carlo, freqs, sampling_freq, filter_order): + too_few_columns_UH = monte_carlo["UH"][:, 1:] + with pytest.raises( + ValueError, + match=r"LSFIR: uncertainties are expected to be " + r"provided in a square matrix shape.*", + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=too_few_columns_UH, + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.function_scoped_fixture, + ], +) +def test_LSFIR_with_wrong_type_UH(monte_carlo, freqs, sampling_freq, filter_order): + uh_list = monte_carlo["UH"].tolist() + with pytest.raises( + TypeError, + match=r"LSFIR: if uncertainties are provided, " + r"they are expected to be of type np\.ndarray.*", + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=cast(np.ndarray, uh_list), + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=4, max_value=8)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + HealthCheck.function_scoped_fixture, + ], +) +@pytest.mark.slow +def test_compare_different_dtypes_LSFIR( + capsys, + monte_carlo, + complex_H_with_UH, + freqs, + sampling_freq, + filter_order, +): + with capsys.disabled(): + b_real_imaginary, ub_real_imaginary = LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + verbose=True, + UH=monte_carlo["UH"], + mc_runs=10000, + ) + b_complex, ub_complex = LSFIR( + H=complex_H_with_UH["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + verbose=True, + UH=monte_carlo["UH"], + mc_runs=10000, + ) + assert_allclose(b_real_imaginary, b_complex, rtol=4e-2) + assert_allclose(ub_real_imaginary, ub_complex, rtol=6e-1) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings(deadline=None) +def test_LSFIR_with_wrong_type_weights(monte_carlo, freqs, sampling_freq, filter_order): + weight_list = [1] * 2 * len(freqs) + with pytest.raises( + TypeError, match=r"LSFIR: User-defined weighting has wrong type.*" + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + weights=cast(np.ndarray, weight_list), + inv=True, + UH=monte_carlo["UH"], + mc_runs=2, + ) + + +@given(weights(guarantee_vector=True), hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + ], +) +@pytest.mark.slow +def test_LSFIR_with_wrong_len_weights( + monte_carlo, freqs, sampling_freq, weight_vector, filter_order +): + weight_vector = weight_vector[1:] + with pytest.raises( + ValueError, + match=r"LSFIR: User-defined weighting has wrong dimension.*", + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + weights=weight_vector, + inv=True, + UH=monte_carlo["UH"], + mc_runs=2, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_not_implemented_LSFIR(monte_carlo, freqs, sampling_freq, filter_order): + expected_error_msg_regex = ( + r"LSFIR: The least-squares fitting of a digital FIR filter " + r".*truncated singular-value decomposition and linear matrix propagation.*is " + r"not yet implemented.*" + ) + with pytest.raises( + NotImplementedError, + match=expected_error_msg_regex, + ): + LSFIR( + H=monte_carlo["H"], + UH=monte_carlo["UH"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=False, + ) + with pytest.raises( + NotImplementedError, + match=expected_error_msg_regex, + ): + LSFIR( + H=monte_carlo["H"], + UH=monte_carlo["UH"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=False, + trunc_svd_tol=0.0, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_missing_mc_uncertainties_LSFIR( + monte_carlo, freqs, sampling_freq, filter_order +): + with pytest.raises( + ValueError, + match=r"LSFIR: The least-squares fitting of a digital FIR filter " + r".*Monte Carlo.*requires that uncertainties are provided via input " + r"parameter UH.*", + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=False, + mc_runs=1, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_missing_svd_uncertainties_LSFIR( + monte_carlo, freqs, sampling_freq, filter_order +): + with pytest.raises( + ValueError, + match=r"LSFIR: The least-squares fitting of a digital FIR filter " + r".*singular-value decomposition and linear matrix propagation.*requires that " + r"uncertainties are provided via input parameter UH.*", + ): + LSFIR( + H=monte_carlo["H"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=True, + trunc_svd_tol=0.0, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_both_propagation_methods_simultaneously_requested_uncertainties_LSFIR( + monte_carlo, freqs, sampling_freq, filter_order +): + with pytest.raises( + ValueError, + match=r"LSFIR: Only one of mc_runs and trunc_svd_tol can be " r"provided but.*", + ): + LSFIR( + H=monte_carlo["H"], + UH=monte_carlo["UH"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=False, + mc_runs=2, + trunc_svd_tol=0.0, + ) + + +@given(hypothesis_dimension(min_value=2, max_value=12), hst.booleans()) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_too_small_number_of_monte_carlo_runs_LSFIR( + monte_carlo, freqs, sampling_freq, filter_order, inv +): + with pytest.raises( + ValueError, + match=r"LSFIR: Number of Monte Carlo runs is expected to be greater " + r"than 1.*", + ): + LSFIR( + H=monte_carlo["H"], + UH=monte_carlo["UH"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + inv=inv, + mc_runs=1, + ) + + +@given(weights(), hypothesis_dimension(min_value=4, max_value=8)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + HealthCheck.function_scoped_fixture, + ], + max_examples=10, +) +@pytest.mark.slow +def test_compare_LSFIR_with_svd_and_with_mc( + capsys, monte_carlo, freqs, sampling_freq, weight_vector, filter_order +): + with capsys.disabled(): + b_fir_svd = LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + weights=weight_vector, + verbose=True, + inv=True, + UH=monte_carlo["UH"], + )[0] + b_fir_mc = LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + weights=weight_vector, + verbose=True, + inv=True, + UH=monte_carlo["UH"], + mc_runs=10000, + )[0] + assert_allclose(b_fir_mc, b_fir_svd, rtol=9e-2) + + +@given(hypothesis_dimension(min_value=4, max_value=8)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + HealthCheck.function_scoped_fixture, + ], +) +@pytest.mark.slow +def test_compare_invLSFIR_uncMC_LSFIR( + capsys, monte_carlo, freqs, sampling_freq, filter_order +): + with capsys.disabled(): + b_fir_mc, Ub_fir_mc = invLSFIR_uncMC( + H=monte_carlo["H"], + UH=monte_carlo["UH"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + ) + b_fir, Ub_fir = LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + inv=True, + UH=monte_carlo["UH"], + mc_runs=10000, + ) + assert_allclose(b_fir_mc, b_fir, rtol=4e-2) + assert_allclose(Ub_fir_mc, Ub_fir, atol=6e-1, rtol=6e-1) + + +@given(hypothesis_dimension(min_value=4, max_value=8), weights()) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + HealthCheck.function_scoped_fixture, + ], +) +@pytest.mark.slow +def test_compare_invLSFIR_unc_LSFIR_only_by_filter_coefficients( + capsys, monte_carlo, freqs, sampling_freq, filter_order, weight_vector +): + with capsys.disabled(): + b_fir_mc = invLSFIR_unc( + H=monte_carlo["H"], + UH=monte_carlo["UH"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + wt=weight_vector, + )[0] + b_fir = LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + weights=weight_vector, + inv=True, + UH=monte_carlo["UH"], + )[0] + assert_allclose(b_fir_mc, b_fir) + + +@given(hypothesis_dimension(min_value=2, max_value=12), weights()) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.too_slow, + ], +) +@pytest.mark.slow +def test_compare_invLSFIR_to_LSFIR( + monte_carlo, freqs, sampling_freq, filter_order, weight_vector +): + b_fir_inv_lsfir = invLSFIR( + H=monte_carlo["H"], + N=filter_order, + tau=filter_order // 2, + f=freqs, + Fs=sampling_freq, + Wt=weight_vector, + ) + b_fir = LSFIR( + H=monte_carlo["H"], + N=filter_order, + f=freqs, + Fs=sampling_freq, + tau=filter_order // 2, + weights=weight_vector, + inv=True, + )[0] + assert_allclose(b_fir, b_fir_inv_lsfir) diff --git a/test/test_LSIIR.py b/test/test_LSIIR.py new file mode 100644 index 000000000..a8c72fc21 --- /dev/null +++ b/test/test_LSIIR.py @@ -0,0 +1,340 @@ +"""Perform tests on identification part of package model_estimation.""" +from collections import namedtuple +from datetime import timedelta +from typing import Dict + +import numpy as np +import pytest +from hypothesis import assume, given, settings, strategies as hst +from numpy.testing import assert_almost_equal, assert_equal + +from PyDynamic import grpdelay, isstable, mapinside, sos_FreqResp +from PyDynamic.model_estimation import fit_filter + + +@hst.composite +def LSIIR_parameters(draw): + """Design a sample measurement system and a corresponding frequency response.""" + # Set the maximum absolute value for floats to be really unique in calculations. + float_generic_params = {"allow_nan": False, "allow_infinity": False} + # measurement system + f0 = draw( + hst.floats(min_value=1e2, max_value=1e6, **float_generic_params) + ) # originally this was set to 36e3 for the system resonance frequency in Hz + + S0 = draw( + hst.floats(min_value=0, max_value=1, **float_generic_params) + ) # originally this was set to 0.124 for the system static gain + + delta = draw( + hst.floats(min_value=1e-5, max_value=1e-1, **float_generic_params) + ) # originally this was set to 0.0055 for the system damping + dim = draw( + hst.integers(min_value=1, max_value=60) + ) # originally this was set to 30 for the number of frequencies + maximum_frequency = draw( + hst.floats(min_value=1e2, max_value=1e6, **float_generic_params) + ) # originally this was set to 80e3 for the system damping + f = np.linspace(0, maximum_frequency, dim) # frequencies for fitting the system + H = sos_FreqResp(S0, delta, f0, f) # frequency response of the 2nd order system + + Fs = draw( + hst.floats(min_value=1e5, max_value=5e6, **float_generic_params) + ) # originally this was set to 500e3 for the sampling frequency + Na = draw( + hst.integers(min_value=1, max_value=10) + ) # originally this was set to 4 for the IIR denominator filter order + Nb = draw( + hst.integers(min_value=1, max_value=10) + ) # originally this was set to 4 for the IIR numerator filter order + return {"H": H, "Na": Na, "Nb": Nb, "f": f, "Fs": Fs} + + +@pytest.fixture(scope="module") +def compute_fitting_parameters(): + """Compute the parameters needed to calculate an IIR model least-square fit + + This provides omega and E for the least-squares fits based on provided params. + """ + + def _compute_fitting_parameters(LSIIR_params: Dict[str, np.ndarray]): + """Compute the parameters needed to calculate an IIR model least-square fit""" + omega = 2 * np.pi * LSIIR_params["f"] / LSIIR_params["Fs"] + Ns = np.arange(0, max(LSIIR_params["Nb"], LSIIR_params["Na"]) + 1)[ + :, np.newaxis + ] + E = np.exp(-1j * np.dot(omega[:, np.newaxis], Ns.T)) + + return {"omega": omega, "E": E} + + return _compute_fitting_parameters + + +@pytest.fixture(scope="module") +def provide_fitted_filter(): + """This provides a IIR least-squares filter fit to a frequency response""" + + def _return_fitted_filter(ls_base_parameters, compute_fitting_parameters): + """This provides a IIR least-squares filter fit to a frequency response""" + Filter = namedtuple("Filter", ["b", "a"]) + + b, a = fit_filter._compute_actual_iir_least_squares_fit( + H=ls_base_parameters["H"], + tau=0, + **compute_fitting_parameters(ls_base_parameters), + Na=ls_base_parameters["Na"], + Nb=ls_base_parameters["Nb"], + inv=False, + ) + return Filter(b=b, a=a) + + return _return_fitted_filter + + +@pytest.fixture(scope="module") +def provide_former_fitIIR(): + """This is the fixture providing the former implementation of _fitIIR""" + + def _former_fitIIR( + _H: np.ndarray, + _tau: int, + _omega: np.ndarray, + _E: np.ndarray, + _Na: int, + _Nb: int, + _inv: bool = False, + ): + """The actual fitting routing for the least-squares IIR filter. + + Parameters + ---------- + _H : (M,) np.ndarray + (complex) frequency response values + _tau : integer + initial estimate of time delay + _omega : np.ndarray + :math:`2 * np.pi * f / Fs` + _E : np.ndarray + :math:`np.exp(-1j * np.dot(w[:, np.newaxis], Ns.T))` + _Nb : int + numerator polynomial order + _Na : int + denominator polynomial order + _inv : bool, optional + If True the least-squares fitting is performed for the reciprocal, + if False + for the actual frequency response + + Returns + ------- + b, a : IIR filter coefficients as numpy arrays + """ + exponent = -1 if _inv else 1 + Ea = _E[:, 1 : _Na + 1] + Eb = _E[:, : _Nb + 1] + Htau = np.exp(-1j * _omega * _tau) * _H ** exponent + HEa = np.dot(np.diag(Htau), Ea) + D = np.hstack((HEa, -Eb)) + Tmp1 = np.real(np.dot(np.conj(D.T), D)) + Tmp2 = np.real(np.dot(np.conj(D.T), -Htau)) + ab = np.linalg.lstsq(Tmp1, Tmp2, rcond=None)[0] + a_coeff = np.hstack((1.0, ab[:_Na])) + b_coeff = ab[_Na:] + return b_coeff, a_coeff + + return _former_fitIIR + + +@pytest.fixture(scope="module") +def former_LSIIR(): + def _former_LSIIR(_former_fitIIR, H, Nb, Na, f, Fs, tau=0, justFit=False): + """LSIIR method before version 2.0.0 + + This helps to assure that the rewritten version matches the results of the + previous implementation. We only commented out all statements, that did not + contribute to the actual computation. This is the state in which the + implementation was in Commit d2ac33ef4d5425de5bd1989d24fe9c11908f2aa0. + + Parameters + ---------- + H: numpy array of (complex) frequency response values of shape (M,) + Nb: integer numerator polynomial order + Na: integer denominator polynomial order + f: numpy array of frequencies at which H is given of shape + (M,) + Fs: sampling frequency + tau: integer initial estimate of time delay + justFit: boolean, when true then no stabilization is carried out + + Returns + ------- + b,a: IIR filter coefficients as numpy arrays + tau: filter time delay in samples + + References + ---------- + * Eichstädt et al. 2010 [Eichst2010]_ + * Vuerinckx et al. 1996 [Vuer1996]_ + + """ + + # print("\nLeast-squares fit of an order %d digital IIR filter" % max(Nb, Na)) + # print("to a frequency response given by %d values.\n" % len(H)) + + w = 2 * np.pi * f / Fs + Ns = np.arange(0, max(Nb, Na) + 1)[:, np.newaxis] + E = np.exp(-1j * np.dot(w[:, np.newaxis], Ns.T)) + + b, a = _former_fitIIR(H, tau, w, E, Na, Nb, _inv=False) + + if justFit: + # print("Calculation done. No stabilization requested.") + # if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: + # print("Obtained filter is NOT stable.") + # sos = np.sum( + # np.abs((dsp.freqz(b, a, 2 * np.pi * f / Fs)[1] - H) ** 2)) + # print("Final sum of squares = %e" % sos) + tau = 0 + return b, a, tau + + if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: + stable = False + else: + stable = True + + maxiter = 50 + + astab = mapinside(a) + run = 1 + + while stable is not True and run < maxiter: + g1 = grpdelay(b, a, Fs)[0] + g2 = grpdelay(b, astab, Fs)[0] + tau = np.ceil(tau + np.median(g2 - g1)) + + b, a = _former_fitIIR(H, tau, w, E, Na, Nb, _inv=False) + if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: + astab = mapinside(a) + else: + stable = True + run = run + 1 + + # if np.count_nonzero(np.abs(np.roots(a)) > 1) > 0: + # print("Caution: The algorithm did NOT result in a stable IIR filter!") + # print( + # "Maybe try again with a higher value of tau0 or a higher filter " + # "order?") + + # print( + # "Least squares fit finished after %d iterations (tau=%d)." % (run, tau)) + # Hd = dsp.freqz(b, a, 2 * np.pi * f / Fs)[1] + # Hd = Hd * np.exp(1j * 2 * np.pi * f / Fs * tau) + # res = np.hstack((np.real(Hd) - np.real(H), np.imag(Hd) - np.imag(H))) + # rms = np.sqrt(np.sum(res ** 2) / len(f)) + # print("Final rms error = %e \n\n" % rms) + + return b, a, int(tau) + + return _former_LSIIR + + +@settings(deadline=None) +@given(LSIIR_parameters()) +def test_LSIIR_outputs_format(parameters): + """This checks against expected formats of the outputs.""" + b, a, tau, _ = fit_filter.LSIIR(**parameters) + + assert_equal(len(b), parameters["Nb"] + 1) + assert_equal(len(a), parameters["Na"] + 1) + assert isinstance(tau, np.integer) + assert tau >= 0 + + +@given(lsiir_base_params=LSIIR_parameters()) +def test_isstable_results_against_former_implementations( + lsiir_base_params, provide_fitted_filter, compute_fitting_parameters +): + """This takes the implementation prior to the rewrite and compares results. + + The original implementation was a check against the exact statement we put into + this test as seen here: + https://github.com/PTB-PSt1/PyDynamic/blob/00c19662333d23c580a9f60750e60021712d8393/PyDynamic/model_estimation/fit_filter.py#L138 + """ + fitted_filter = provide_fitted_filter(lsiir_base_params, compute_fitting_parameters) + assert not ( + np.count_nonzero(np.abs(np.roots(fitted_filter.a)) > 1) > 0 + ) == isstable(fitted_filter.b, fitted_filter.a, ftype="digital") + + +@settings(deadline=timedelta(milliseconds=400)) +@given( + lsiir_base_params=LSIIR_parameters(), + tau=hst.integers(min_value=0, max_value=100), + inv=hst.booleans(), +) +def test_fitIIR_results_against_former_implementations( + lsiir_base_params, tau, inv, compute_fitting_parameters, provide_former_fitIIR +): + """This takes the implementation prior to the rewrite and compares results.""" + if inv: + # Make sure there are non-zero frequency responses. Otherwise fitting to + # reciprocal of frequency response means dividing by zero. + assume(not np.all(lsiir_base_params["H"] == 0)) + + # Initialize parameters. + fit_params = { + "H": lsiir_base_params["H"], + "tau": tau, + **compute_fitting_parameters(LSIIR_params=lsiir_base_params), + "Na": lsiir_base_params["Na"], + "Nb": lsiir_base_params["Nb"], + "inv": inv, + } + + # Compute solution of current version. + b_current, a_current = fit_filter._compute_actual_iir_least_squares_fit( + **fit_params + ) + + # Rename parameter dict keys to the same name with leading underscore for the old + # version. + fit_params = {f"_{key}": value for key, value in fit_params.items()} + + # Compute solution of former version. + b_former, a_former = provide_former_fitIIR(**fit_params) + + assert_almost_equal(b_current, b_former) + assert_almost_equal(a_current, a_former) + + +@settings(deadline=timedelta(milliseconds=1000)) +@given(lsiir_base_params=LSIIR_parameters()) +def test_LSIIR_results_against_former_implementations( + lsiir_base_params, provide_former_fitIIR, former_LSIIR +): + """This takes the implementation prior to the rewrite and compares results.""" + b_current, a_current, tau_current, _ = fit_filter.LSIIR(**lsiir_base_params) + b_former, a_former, tau_former = former_LSIIR( + provide_former_fitIIR, **lsiir_base_params + ) + + assert_almost_equal(b_current, b_former) + assert_almost_equal(a_current, a_former) + assert_almost_equal(tau_current, tau_former) + + +@given(lsiir_base_params=LSIIR_parameters()) +def test_fit_iir_via_least_squares_exception( + lsiir_base_params, provide_fitted_filter, compute_fitting_parameters +): + if np.any(lsiir_base_params["H"] != 0): + lsiir_base_params["H"] = np.zeros_like(lsiir_base_params["H"], dtype=complex) + with pytest.raises(ValueError): + fit_filter._compute_actual_iir_least_squares_fit( + H=lsiir_base_params["H"], + tau=0, + **compute_fitting_parameters(lsiir_base_params), + Na=lsiir_base_params["Na"], + Nb=lsiir_base_params["Nb"], + inv=True, + ) diff --git a/test/test_fit_som.py b/test/test_fit_som.py index 5909c3ab0..09154eb8c 100644 --- a/test/test_fit_som.py +++ b/test/test_fit_som.py @@ -105,16 +105,15 @@ def test_fit_som_with_too_short_UH(params): @given(random_input_to_fit_som(guarantee_UH_as_matrix=True)) -def test_fit_som_with_unsquare_UH(params): +def test_fit_som_with_nonsquare_UH(params): params["UH"] = params["UH"][:, 1:] with pytest.raises(ValueError): fit_som(**params) -@given(random_input_to_fit_som()) +@given(random_input_to_fit_som(guarantee_UH_as_matrix=True)) def test_fit_som_with_nonint_MCruns(params): params["MCruns"] = float(params["MCruns"]) - assume(params["UH"] is not None) with pytest.raises(ValueError): fit_som(**params) @@ -134,6 +133,7 @@ def test_fit_som_with_too_short_weighting_vector(params): @given(random_input_to_fit_som()) +@pytest.mark.slow def test_fit_som_with_zero_frequency_response_but_without_MCruns_or_UH(params): params["H"][0] = 0.0 assume(params["MCruns"] is None or params["UH"] is None) diff --git a/test/test_identification.py b/test/test_identification.py deleted file mode 100644 index 7ac2680f5..000000000 --- a/test/test_identification.py +++ /dev/null @@ -1,28 +0,0 @@ -""" Perform tests on identification sub-packages.""" - -import numpy as np - -from PyDynamic.model_estimation import fit_filter -from PyDynamic.misc.SecondOrderSystem import sos_FreqResp - - -def test_LSIIR(): - # measurement system - f0 = 36e3 # system resonance frequency in Hz - S0 = 0.124 # system static gain - delta = 0.0055 # system damping - - f = np.linspace(0, 80e3, 30) # frequencies for fitting the system - Hvals = sos_FreqResp(S0, delta, f0, f) # frequency response of the 2nd order system - - # %% fitting the IIR filter - - Fs = 500e3 # sampling frequency - Na = 4 - Nb = 4 # IIR filter order (Na - denominator, Nb - numerator) - - b, a, tau = fit_filter.LSIIR(Hvals, Na, Nb, f, Fs) - - assert len(b) == Nb + 1 - assert len(a) == Na + 1 - assert isinstance(tau, int) diff --git a/test/test_interpolate.py b/test/test_interpolate.py index 891b2246e..d1c424eb2 100644 --- a/test/test_interpolate.py +++ b/test/test_interpolate.py @@ -1,7 +1,7 @@ from typing import Dict, Optional, Tuple, Union import hypothesis.extra.numpy as hnp -import hypothesis.strategies as st +import hypothesis.strategies as hst import numpy as np import pytest from hypothesis import assume, given @@ -94,10 +94,10 @@ def draw_fill_values(strategy_spec: str): ------- The drawn sample to match desired fill_value. """ - float_strategy = st.floats(**float_generic_params) - tuple_strategy = st.tuples(float_strategy, float_strategy) - string_strategy = st.just("extrapolate") - nan_strategy = st.just(np.nan) + float_strategy = hst.floats(**float_generic_params) + tuple_strategy = hst.tuples(float_strategy, float_strategy) + string_strategy = hst.just("extrapolate") + nan_strategy = hst.just(np.nan) if strategy_spec == "float": fill_strategy = float_strategy elif strategy_spec == "tuple": @@ -107,7 +107,7 @@ def draw_fill_values(strategy_spec: str): elif strategy_spec == "nan": fill_strategy = nan_strategy else: - fill_strategy = st.one_of( + fill_strategy = hst.one_of( float_strategy, tuple_strategy, string_strategy, nan_strategy ) return draw(fill_strategy) @@ -124,7 +124,7 @@ def draw_fill_values(strategy_spec: str): strategy_params = { "dtype": np.float, "shape": shape_for_x, - "elements": st.floats( + "elements": hst.floats( min_value=-float_abs_max, max_value=float_abs_max, **float_generic_params ), "unique": True, @@ -143,11 +143,11 @@ def draw_fill_values(strategy_spec: str): uy = draw(hnp.arrays(**strategy_params)) # Draw the interpolation kind from the provided tuple. - kind = draw(st.sampled_from(kind_tuple)) + kind = draw(hst.sampled_from(kind_tuple)) if for_make_equidistant: dx = draw( - st.floats( + hst.floats( min_value=(np.max(x) - np.min(x)) * 1e-3, max_value=(np.max(x) - np.min(x)) / 2, exclude_min=True, @@ -166,13 +166,13 @@ def draw_fill_values(strategy_spec: str): if not extrapolate: # In case we do not want to extrapolate, use range of "original" # x values as boundaries. - strategy_params["elements"] = st.floats( + strategy_params["elements"] = hst.floats( min_value=x_min, max_value=x_max, **float_generic_params ) fill_value = fill_unc = np.nan # Switch between default value None and intentionally setting to True, # which should behave identically. - bounds_error = draw(st.one_of(st.just(True), st.none())) + bounds_error = draw(hst.one_of(hst.just(True), hst.none())) else: # In case we want to extrapolate, draw some fill values for the # out-of-bounds range. Those will be either single floats or a 2-tuple of @@ -585,7 +585,7 @@ def test_value_error_for_returnc_interp1d_unc(interp_inputs): interp1d_unc(**interp_inputs) -@given(st.integers(min_value=3, max_value=1000)) +@given(hst.integers(min_value=3, max_value=1000)) def test_linear_uy_in_interp1d_unc( n, ): @@ -688,7 +688,7 @@ def test_linear_in_make_equidistant(interp_inputs): assert np.all(np.amax(interp_inputs["y"]) >= y_new) -@given(st.integers(min_value=3, max_value=1000)) +@given(hst.integers(min_value=3, max_value=1000)) @pytest.mark.slow def test_linear_uy_in_make_equidistant(n): # Check for given input, if interpolated uncertainties equal 1 and diff --git a/test/test_propagate_DWT.py b/test/test_propagate_DWT.py index 5aeef86c5..e3f51e95e 100644 --- a/test/test_propagate_DWT.py +++ b/test/test_propagate_DWT.py @@ -4,6 +4,7 @@ import numpy as np import pywt +from numpy.testing import assert_allclose from PyDynamic.uncertainty.propagate_DWT import ( dwt, @@ -56,8 +57,8 @@ def test_dwt(): ca, cd = pywt.dwt(x, filter_name, mode="constant") assert ca.size == y1.size assert cd.size == y2.size - assert np.allclose(ca, y1) - assert np.allclose(cd, y2) + assert_allclose(ca, y1, atol=1e-15) + assert_allclose(cd, y2, atol=1e-15) def test_inv_dwt(): @@ -85,7 +86,7 @@ def test_inv_dwt(): # compare to pywt r = pywt.idwt(c_approx, c_detail, filter_name, mode="constant") - assert np.allclose(x, r) + assert_allclose(x, r) def test_identity_single(): @@ -109,11 +110,11 @@ def test_identity_single(): if x.size % 2 == 0: assert x.size == xr.size assert Ux.size == Uxr.size - assert np.allclose(x, xr) + assert_allclose(x, xr) else: assert x.size + 1 == xr.size assert Ux.size + 1 == Uxr.size - assert np.allclose(x, xr[:-1]) + assert_allclose(x, xr[:-1]) def test_max_level(): @@ -143,7 +144,7 @@ def test_wave_dec(): # compare output in detail for a, b in zip(result_pywt, coeffs): assert len(a) == len(b) - assert np.allclose(a, b) + assert_allclose(a, b, atol=1e-15) def test_decomposition_realtime(): @@ -194,12 +195,12 @@ def test_decomposition_realtime(): # compare output in detail for a, b in zip(coeffs_a, coeffs_b): assert len(a) == len(b) - assert np.allclose(a, b) + assert_allclose(a, b) # compare output uncertainty in detail for a, b in zip(Ucoeffs_a, Ucoeffs_b): assert len(a) == len(b) - assert np.allclose(a, b) + assert_allclose(a, b) def test_wave_rec(): @@ -228,7 +229,7 @@ def test_wave_rec(): # compare output of both methods assert len(result_pywt) == len(x) - assert np.allclose(result_pywt, x) + assert_allclose(result_pywt, x) def test_identity_multi(): @@ -251,5 +252,5 @@ def test_identity_multi(): xr, Uxr = wave_rec(coeffs, Ucoeffs, lr, hr, original_length=ol) assert x.size == xr.size - assert np.allclose(x, xr) + assert_allclose(x, xr) assert Ux.size == Uxr.size diff --git a/test/test_propagate_MonteCarlo.py b/test/test_propagate_MonteCarlo.py index c1c396923..6f53f475e 100644 --- a/test/test_propagate_MonteCarlo.py +++ b/test/test_propagate_MonteCarlo.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import numpy as np import pytest +from numpy.testing import assert_allclose from PyDynamic.misc.filterstuff import kaiser_lowpass from PyDynamic.misc.noise import ARMA @@ -66,6 +67,11 @@ def test_MC(visualizeOutput=False): plt.show() +def test_MC_non_negative_main_diagonal_covariance(): + _, Uy = MC(x, sigma_noise, b1, np.ones(1), Ub, runs=runs, blow=b2) + assert np.all(np.diag(Uy) >= 0) + + def test_SMC(): # run method y, Uy = SMC(x, sigma_noise, b1, np.ones(1), Ub, runs=runs) @@ -167,8 +173,8 @@ def test_compare_MC_UMC(): ) # both methods should yield roughly the same results - assert np.allclose(y_MC, y_UMC, atol=5e-4) - assert np.allclose(Uy_MC, Uy_UMC, atol=5e-4) + assert_allclose(y_MC, y_UMC, atol=5e-4) + assert_allclose(Uy_MC, Uy_UMC, atol=5e-4) def test_noise_ARMA(): diff --git a/test/test_propagate_filter.py b/test/test_propagate_filter.py index 7b814dced..aaf0f938d 100644 --- a/test/test_propagate_filter.py +++ b/test/test_propagate_filter.py @@ -1,14 +1,18 @@ """Perform test for uncertainty.propagate_filter""" import itertools +from typing import Any, Callable, Dict, Tuple import numpy as np import pytest import scipy +from hypothesis import given, HealthCheck, settings, strategies as hst +from hypothesis.strategies import composite +from numpy.testing import assert_allclose, assert_equal from scipy.linalg import toeplitz from scipy.signal import lfilter, lfilter_zi from PyDynamic.misc.testsignals import rect -from PyDynamic.misc.tools import trimOrPad +from PyDynamic.misc.tools import shift_uncertainty, trimOrPad # noinspection PyProtectedMember from PyDynamic.uncertainty.propagate_filter import ( @@ -19,59 +23,88 @@ IIRuncFilter, ) from PyDynamic.uncertainty.propagate_MonteCarlo import MC -from .conftest import random_covariance_matrix - - -def random_array(length): - array = np.random.randn(length) - return array - - -def random_nonnegative_array(length): - array = np.random.random(length) - return array - - -def random_rightsided_autocorrelation(length): - array = random_array(length) - acf = scipy.signal.correlate(array, array, mode="full") - return acf[len(acf) // 2 :] +from .conftest import ( + hypothesis_covariance_matrix, + hypothesis_float_vector, + hypothesis_not_negative_float, + random_covariance_matrix, + scale_matrix_or_vector_to_range, +) -def valid_filters(): - N = np.random.randint(2, 100) # scipy.linalg.companion requires N >= 2 - theta = random_array(N) +@composite +def FIRuncFilter_input( + draw: Callable, exclude_corr_kind: bool = False +) -> Dict[str, Any]: + filter_length = draw( + hst.integers(min_value=2, max_value=100) + ) # scipy.linalg.companion requires N >= 2 + filter_theta = draw( + hypothesis_float_vector(length=filter_length, min_value=1e-2, max_value=1e3) + ) + filter_theta_covariance = draw( + hst.one_of( + hypothesis_covariance_matrix(number_of_rows=filter_length), hst.just(None) + ) + ) - return [ - {"theta": theta, "Utheta": None}, - {"theta": theta, "Utheta": np.zeros((N, N))}, - {"theta": theta, "Utheta": random_covariance_matrix(N)}, - ] + signal_length = draw(hst.integers(min_value=200, max_value=1000)) + signal = draw( + hypothesis_float_vector(length=signal_length, min_value=1e-2, max_value=1e3) + ) + if exclude_corr_kind: + kind = draw(hst.sampled_from(("float", "diag"))) + else: + kind = draw(hst.sampled_from(("float", "diag", "corr"))) + if kind == "diag": + signal_standard_deviation = draw( + hypothesis_float_vector(length=signal_length, min_value=0, max_value=1e2) + ) + elif kind == "corr": + random_data = draw( + hypothesis_float_vector( + length=signal_length // 2, min_value=1e-2, max_value=1e2 + ) + ) + acf = scipy.signal.correlate(random_data, random_data, mode="full") -def valid_signals(): - N = np.random.randint(100, 1000) - signal = random_array(N) + scaled_acf = scale_matrix_or_vector_to_range( + acf, range_min=1e-10, range_max=1e3 + ) + signal_standard_deviation = scaled_acf[len(scaled_acf) // 2 :] + else: + signal_standard_deviation = draw(hypothesis_not_negative_float(max_value=1e2)) + + lowpass_length = draw( + hst.integers(min_value=2, max_value=10) + ) # scipy.linalg.companion requires N >= 2 + lowpass = draw( + hst.one_of( + ( + hypothesis_float_vector( + length=lowpass_length, min_value=1e-2, max_value=1e2 + ), + hst.just(None), + ) + ) + ) - return [ - {"y": signal, "sigma_noise": np.random.randn(), "kind": "float"}, - {"y": signal, "sigma_noise": random_nonnegative_array(N), "kind": "diag"}, - { - "y": signal, - "sigma_noise": random_rightsided_autocorrelation(N // 2), - "kind": "corr", - }, - ] + shift = draw(hypothesis_not_negative_float(max_value=1e2)) + return { + "theta": filter_theta, + "Utheta": filter_theta_covariance, + "y": signal, + "sigma_noise": signal_standard_deviation, + "kind": kind, + "blow": lowpass, + "shift": shift, + } -def valid_lows(): - N = np.random.randint(2, 10) # scipy.linalg.companion requires N >= 2 - blow = random_array(N) - return [ - {"blow": None}, - {"blow": blow}, - ] +def random_array(length): + return np.random.randn(length) @pytest.fixture @@ -110,15 +143,14 @@ def equal_signals(): return equal_signals -@pytest.mark.parametrize("filters", valid_filters()) -@pytest.mark.parametrize("signals", valid_signals()) -@pytest.mark.parametrize("lowpasses", valid_lows()) +@given(FIRuncFilter_input()) +@settings(deadline=None) @pytest.mark.slow -def test_FIRuncFilter(filters, signals, lowpasses): +def test_FIRuncFilter(fir_unc_filter_input): # Check expected output for thinkable permutations of input parameters. - y, Uy = FIRuncFilter(**filters, **signals, **lowpasses) - assert len(y) == len(signals["y"]) - assert len(Uy) == len(signals["y"]) + y, Uy = FIRuncFilter(**fir_unc_filter_input) + assert len(y) == len(fir_unc_filter_input["y"]) + assert len(Uy) == len(fir_unc_filter_input["y"]) # note: a direct comparison against scipy.signal.lfilter is not needed, # as y is already computed using this method @@ -363,96 +395,199 @@ def test_FIRuncFilter_equality(equal_filters, equal_signals): # check that all have the same output, as they are supposed to represent equal cases for a, b in itertools.combinations(all_y, 2): - assert np.allclose(a, b) + assert_allclose(a, b) for a, b in itertools.combinations(all_uy, 2): - assert np.allclose(a, b) + assert_allclose(a, b) + + +@given(FIRuncFilter_input()) +@settings(deadline=None) +@pytest.mark.slow +def test_FIRuncFilter_for_correct_output_dimensions_for_full_covariance( + fir_unc_filter_input, +): + y_fir, Uy_fir = FIRuncFilter(**fir_unc_filter_input, return_full_covariance=True) + assert_equal(len(fir_unc_filter_input["y"]), len(y_fir)) + assert_equal(Uy_fir.shape, (len(y_fir), len(y_fir))) + +@given(FIRuncFilter_input()) +@settings(deadline=None) +@pytest.mark.slow +def test_FIRuncFilter_for_correct_output_dimensions_for_vector_covariance( + fir_unc_filter_input, +): + _, Uy = FIRuncFilter(**fir_unc_filter_input) + assert_equal(Uy.shape, (len(fir_unc_filter_input["y"]),)) + + +@given(FIRuncFilter_input()) +@settings(deadline=None) +@pytest.mark.slow +def test_FIRuncFilter_for_correct_dimension_of_y(fir_unc_filter_input): + y_fir = FIRuncFilter(**fir_unc_filter_input)[0] + assert_equal(len(fir_unc_filter_input["y"]), len(y_fir)) -# in the following test, we exclude the case of a valid signal with uncertainty given as + +# in the following test, we exclude the case of a valid signal with uncertainty +# given as # the right-sided auto-covariance (acf). This is done, because we currently do not # ensure, that the random-drawn acf generates a positive-semidefinite -# Toeplitz-matrix. Therefore we cannot construct a valid and equivalent input for the +# Toeplitz-matrix. Therefore we cannot construct a valid and equivalent input for +# the # Monte-Carlo method in that case. -@pytest.mark.parametrize("filters", valid_filters()) -@pytest.mark.parametrize("signals", valid_signals()[:2]) # exclude kind="corr" -@pytest.mark.parametrize("lowpasses", valid_lows()) +@given(FIRuncFilter_input(exclude_corr_kind=True)) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.function_scoped_fixture, + HealthCheck.too_slow, + ], + max_examples=10, +) @pytest.mark.slow -def test_FIRuncFilter_MC_uncertainty_comparison(filters, signals, lowpasses): +def test_FIRuncFilter_MC_uncertainty_comparison(capsys, fir_unc_filter_input): # Check output for thinkable permutations of input parameters against a Monte Carlo # approach. # run method - y_fir, Uy_fir = FIRuncFilter( - **filters, **signals, **lowpasses, return_full_covariance=True - ) + y_fir, Uy_fir = FIRuncFilter(**fir_unc_filter_input, return_full_covariance=True) # run Monte Carlo simulation of an FIR # adjust input to match conventions of MC - x = signals["y"] - ux = signals["sigma_noise"] + x = fir_unc_filter_input["y"] + ux = fir_unc_filter_input["sigma_noise"] - b = filters["theta"] - a = [1.0] - if isinstance(filters["Utheta"], np.ndarray): - Uab = filters["Utheta"] + b = fir_unc_filter_input["theta"] + a = np.ones(1) + if isinstance(fir_unc_filter_input["Utheta"], np.ndarray): + Uab = fir_unc_filter_input["Utheta"] else: # Utheta == None Uab = np.zeros((len(b), len(b))) # MC-method cant deal with Utheta = None - blow = lowpasses["blow"] + blow = fir_unc_filter_input["blow"] if isinstance(blow, np.ndarray): n_blow = len(blow) else: n_blow = 0 # run FIR with MC and extract diagonal of returned covariance - y_mc, Uy_mc = MC(x, ux, b, a, Uab, blow=blow, runs=2000) + with capsys.disabled(): + y_mc, Uy_mc = MC( + x, + ux, + b, + a, + Uab, + blow=blow, + runs=2000, + shift=-fir_unc_filter_input["shift"], + verbose=True, + ) + + # approximate comparison after swing-in of MC-result (which is after the combined + # length of blow and b) + swing_in_length = len(b) + n_blow + relevant_y_fir, relevant_Uy_fir = _set_irrelevant_ranges_to_zero( + signal=y_fir, + uncertainties=Uy_fir, + swing_in_length=swing_in_length, + shift=fir_unc_filter_input["shift"], + ) + relevant_y_mc, relevant_Uy_mc = _set_irrelevant_ranges_to_zero( + signal=y_mc, + uncertainties=Uy_mc, + swing_in_length=swing_in_length, + shift=fir_unc_filter_input["shift"], + ) # HACK for visualization during debugging - # import matplotlib.pyplot as plt - # fig, ax = plt.subplots(nrows=1, ncols=3) - # ax[0].plot(y_fir, label="fir") - # ax[0].plot(y_mc, label="mc") - # ax[0].set_title("filter: {0}, signal: {1}".format(len(b), len(x))) - # ax[0].legend() - # ax[1].imshow(Uy_fir) - # ax[1].set_title("FIR") - # ax[2].imshow(Uy_mc) - # ax[2].set_title("MC") - # plt.show() + # from PyDynamic.misc.tools import plot_vectors_and_covariances_comparison + # + # plot_vectors_and_covariances_comparison( + # vector_1=relevant_y_fir, + # vector_2=relevant_y_mc, + # covariance_1=relevant_Uy_fir, + # covariance_2=relevant_Uy_mc, + # label_1="fir", + # label_2="mc", + # title=f"filter length: {len(b)}, signal length: {len(x)}, blow: " + # f"{fir_unc_filter_input['blow']}", + # ) # /HACK + assert_allclose( + relevant_y_fir, + relevant_y_mc, + atol=np.max((np.max(np.abs(y_fir)), 2e-1)), + ) + assert_allclose( + relevant_Uy_fir, + relevant_Uy_mc, + atol=np.max((np.max(Uy_fir), 1e-7)), + ) - # check basic properties - assert np.all(np.diag(Uy_fir) >= 0) - assert np.all(np.diag(Uy_mc) >= 0) - assert Uy_fir.shape == Uy_mc.shape - # approximate comparison after swing-in of MC-result (which is after the combined - # length of blow and b) - assert np.allclose( - Uy_fir[len(b) + n_blow :, len(b) + n_blow :], - Uy_mc[len(b) + n_blow :, len(b) + n_blow :], - atol=2e-1 * Uy_fir.max(), # very broad check, increase runs for better fit - rtol=1e-1, +def _set_irrelevant_ranges_to_zero( + signal: np.ndarray, uncertainties: np.ndarray, swing_in_length: int, shift: float +) -> Tuple[np.ndarray, np.ndarray]: + relevant_signal_comparison_range_after_swing_in = np.zeros_like(signal, dtype=bool) + relevant_uncertainty_comparison_range_after_swing_in = np.zeros_like( + uncertainties, dtype=bool ) + relevant_signal_comparison_range_after_swing_in[swing_in_length:] = 1 + relevant_uncertainty_comparison_range_after_swing_in[ + swing_in_length:, swing_in_length: + ] = 1 + ( + shifted_relevant_signal_comparison_range_after_swing_in, + shifted_relevant_uncertainty_comparison_range_after_swing_in, + ) = shift_uncertainty( + relevant_signal_comparison_range_after_swing_in, + relevant_uncertainty_comparison_range_after_swing_in, + -int(shift), + ) + signal[np.logical_not(shifted_relevant_signal_comparison_range_after_swing_in)] = 0 + uncertainties[ + np.logical_not(shifted_relevant_uncertainty_comparison_range_after_swing_in) + ] = 0 + return signal, uncertainties -@pytest.mark.parametrize("filters", valid_filters()) -@pytest.mark.parametrize("signals", valid_signals()) -@pytest.mark.parametrize("lowpasses", valid_lows()) +@given(FIRuncFilter_input()) +@settings(deadline=None) @pytest.mark.slow -def test_FIRuncFilter_legacy_comparison(filters, signals, lowpasses): - # Compare output of both functions for thinkable permutations of input parameters. - y, Uy = legacy_FIRuncFilter(**filters, **signals, **lowpasses) - y2, Uy2 = FIRuncFilter(**filters, **signals, **lowpasses) +def test_FIRuncFilter_non_negative_main_diagonal_covariance(fir_unc_filter_input): + _, Uy_fir = FIRuncFilter(**fir_unc_filter_input, return_full_covariance=True) + assert np.all(np.diag(Uy_fir) >= 0) - # check output dimensions - assert len(y2) == len(signals["y"]) - assert Uy2.shape == (len(signals["y"]),) - # check value identity - assert np.allclose(y, y2) - assert np.allclose(Uy, Uy2) +@given(FIRuncFilter_input()) +@settings( + deadline=None, + suppress_health_check=[ + *settings.default.suppress_health_check, + HealthCheck.function_scoped_fixture, + ], +) +@pytest.mark.slow +def test_FIRuncFilter_legacy_comparison(capsys, fir_unc_filter_input): + legacy_y, legacy_Uy = legacy_FIRuncFilter(**fir_unc_filter_input) + with capsys.disabled(): + current_y, current_Uy = FIRuncFilter(**fir_unc_filter_input) + + assert_allclose( + legacy_y, + current_y, + atol=1e-15, + ) + assert_allclose( + legacy_Uy, + current_Uy, + atol=np.max((np.max(current_Uy) * 1e-7, 1e-7)), + rtol=3e-6, + ) @pytest.mark.slow @@ -469,27 +604,22 @@ def test_fir_filter_MC_comparison(): y_fir, Uy_fir = _fir_filter(x, theta, Ux, Utheta, initial_conditions="zero") # run FIR with MC and extract diagonal of returned covariance - y_mc, Uy_mc = MC(x, Ux, theta, [1.0], Utheta, blow=None, runs=10000) + y_mc, Uy_mc = MC(x, Ux, theta, np.ones(1), Utheta, blow=None, runs=10000) # HACK: for visualization during debugging - # import matplotlib.pyplot as plt - # fig, ax = plt.subplots(nrows=1, ncols=3) - # ax[0].plot(y_fir, label="fir") - # ax[0].plot(y_mc, label="mc") - # ax[0].set_title("filter: {0}, signal: {1}".format(len(theta), len(x))) - # ax[0].legend() - # ax[1].imshow(Uy_fir) - # ax[1].set_title("FIR") - # ax[2].imshow(Uy_mc) - # ax[2].set_title("MC") - # plt.show() + # from PyDynamic.misc.tools import plot_vectors_and_covariances_comparison + # plot_vectors_and_covariances_comparison( + # vector_1=y_fir, + # vector_2=y_mc, + # covariance_1=Uy_fir, + # covariance_2=Uy_mc, + # label_1="fir", + # label_2="mc", + # title=f"filter: {len(theta)}, signal: {len(x)}", + # ) # /HACK - - # approximate comparison - assert np.all(np.diag(Uy_fir) >= 0) - assert np.all(np.diag(Uy_mc) >= 0) - assert Uy_fir.shape == Uy_mc.shape - assert np.allclose(Uy_fir, Uy_mc, atol=1e-1, rtol=1e-1) + assert_allclose(y_fir, y_mc, atol=1e-1, rtol=1e-1) + assert_allclose(Uy_fir, Uy_mc, atol=1e-1, rtol=1e-1) def test_IIRuncFilter(): @@ -523,7 +653,12 @@ def fir_filter(): @pytest.fixture(scope="module") -def input_signal(): +def sigma_noise(): + return 1e-2 # std for input signal + + +@pytest.fixture(scope="module") +def input_signal(sigma_noise): # simulate input and output signals Fs = 100e3 # sampling frequency (in Hz) @@ -532,7 +667,6 @@ def input_signal(): time = np.arange(nx) * Ts # time values # input signal + run methods - sigma_noise = 1e-2 # std for input signal x = rect(time, 100 * Ts, 250 * Ts, 1.0, noise=sigma_noise) # generate input signal Ux = sigma_noise * np.ones_like(x) # uncertainty of input signal @@ -596,8 +730,8 @@ def test_IIRuncFilter_identity_nonchunk_chunk( y2, Uy2 = run_IIRuncFilter_in_chunks # check if both ways of calling IIRuncFilter yield the same result - assert np.allclose(y1, y2) - assert np.allclose(Uy1, Uy2) + assert_allclose(y1, y2) + assert_allclose(Uy1, Uy2) @pytest.mark.parametrize("kind", ["diag", "corr"]) @@ -612,8 +746,8 @@ def test_FIR_IIR_identity(kind, fir_filter, input_signal): kind=kind, ) - assert np.allclose(y_fir, y_iir) - assert np.allclose(Uy_fir, Uy_iir) + assert_allclose(y_fir, y_iir) + assert_allclose(Uy_fir, Uy_iir) def test_tf2ss(iir_filter): @@ -623,10 +757,10 @@ def test_tf2ss(iir_filter): A1, B1, C1, D1 = _tf2ss(b, a) A2, B2, C2, D2 = scipy.signal.tf2ss(b, a) - assert np.allclose(A1, A2[::-1, ::-1]) - assert np.allclose(B1, B2[::-1, ::-1]) - assert np.allclose(C1, C2[::-1, ::-1]) - assert np.allclose(D1, D2[::-1, ::-1]) + assert_allclose(A1, A2[::-1, ::-1]) + assert_allclose(B1, B2[::-1, ::-1]) + assert_allclose(C1, C2[::-1, ::-1]) + assert_allclose(D1, D2[::-1, ::-1]) def test_get_derivative_A(): @@ -639,4 +773,12 @@ def test_get_derivative_A(): sliced_diagonal = np.full(p, -1.0) - assert np.allclose(dA[index1, index2, index3], sliced_diagonal) + assert_allclose(dA[index1, index2, index3], sliced_diagonal) + + +def test_IIRuncFilter_raises_warning_for_kind_not_diag_with_scalar_covariance( + sigma_noise, iir_filter, input_signal +): + input_signal["Ux"] = sigma_noise + with pytest.warns(UserWarning): + IIRuncFilter(**input_signal, **iir_filter, kind="corr")