diff --git a/.appveyor/after_test.bat b/.appveyor/after_test.bat deleted file mode 100644 index c3ea1212..00000000 --- a/.appveyor/after_test.bat +++ /dev/null @@ -1,6 +0,0 @@ -IF DEFINED CYBUILD ( - %BUILD% python setup.py bdist_wheel - IF "%APPVEYOR_REPO_TAG%"=="true" ( - twine upload -u %PYPI_USERNAME% -p %PYPI_PASSWORD% dist\*.whl - ) -) \ No newline at end of file diff --git a/.appveyor/build.cmd b/.appveyor/build.cmd deleted file mode 100644 index 75ac0733..00000000 --- a/.appveyor/build.cmd +++ /dev/null @@ -1,21 +0,0 @@ -@echo off -:: To build extensions for 64 bit Python 3, we need to configure environment -:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: -:: MS Windows SDK for Windows 7 and .NET Framework 4 -:: -:: More details at: -:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows - -IF "%DISTUTILS_USE_SDK%"=="1" ( - ECHO Configuring environment to build with MSVC on a 64bit architecture - ECHO Using Windows SDK 7.1 - "C:\Program Files\Microsoft SDKs\Windows\v7.1\Setup\WindowsSdkVer.exe" -q -version:v7.1 - CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64 /release - SET MSSdk=1 - REM Need the following to allow tox to see the SDK compiler - SET TOX_TESTENV_PASSENV=DISTUTILS_USE_SDK MSSdk INCLUDE LIB -) ELSE ( - ECHO Using default MSVC build environment -) - -CALL %* \ No newline at end of file diff --git a/.appveyor/prepare.bat b/.appveyor/prepare.bat deleted file mode 100644 index 1fcec189..00000000 --- a/.appveyor/prepare.bat +++ /dev/null @@ -1,24 +0,0 @@ -pip install -U wheel setuptools || goto :error -nuget install redis-64 -excludeversion || goto :error -redis-64\tools\redis-server.exe --service-install || goto :error -redis-64\tools\redis-server.exe --service-start || goto :error -IF NOT DEFINED SKIPZMQ ( - nuget install ZeroMQ || goto :error -) -IF DEFINED CYBUILD ( - %BUILD% pip install cython twine || goto :error - cython logbook\_speedups.pyx || goto :error -) ELSE ( - set DISABLE_LOGBOOK_CEXT=True -) -IF DEFINED SKIPZMQ ( - %BUILD% pip install -e .[dev,execnet,jinja,sqlalchemy,redis] || goto :error -) ELSE ( - %BUILD% pip install -e .[all] || goto :error -) -REM pypiwin32 can fail, ignore error. -%BUILD% pip install pypiwin32 -exit /b 0 - -:error -exit /b %errorlevel% diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..a8be3024 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,107 @@ +--- +name: CI + +on: + push: + branches: ["develop", "master"] + pull_request: + branches: ["develop", "master"] + workflow_dispatch: + +jobs: + linux: + name: "Linux (${{ matrix.python-version }})" + runs-on: "ubuntu-latest" + + services: + redis: + image: redis + ports: + - 6379:6379 + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + + steps: + - uses: "actions/checkout@v3" + - uses: "actions/setup-python@v4" + with: + python-version: "${{ matrix.python-version }}" + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade tox tox-gh-actions + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "python -m tox -- -r aR" + + windows: + name: "Windows (${{ matrix.python-version }}, ${{ matrix.arch }})" + runs-on: "windows-latest" + + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + arch: ["x86", "x64"] + + env: + ENABLE_LOGBOOK_NTEVENTLOG_TESTS: "1" + + steps: + - uses: "actions/checkout@v3" + - uses: "actions/setup-python@v4" + with: + python-version: "${{ matrix.python-version }}" + architecture: "${{ matrix.arch }}" + + - run: python -VV + - run: python -m site + - run: python -m pip install --upgrade pip setuptools wheel + - run: python -m pip install --upgrade tox tox-gh-actions + + - name: "Run tox targets for ${{ matrix.python-version }} on ${{ matrix.arch }}" + run: "python -m tox -- -r aR -k 'not redis'" + + macos: + name: "macOS (${{ matrix.python-version }})" + runs-on: "macos-latest" + + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + + steps: + - uses: "actions/checkout@v3" + - uses: "actions/setup-python@v4" + with: + python-version: "${{ matrix.python-version }}" + architecture: "${{ matrix.arch }}" + - name: "Install dependencies" + run: | + set -xe + python -VV + python -m site + python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade tox tox-gh-actions + + - name: "Run tox targets for ${{ matrix.python-version }}" + run: "python -m tox -- -r aR -k 'not redis'" + + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + - uses: pre-commit/action@v3.0.0 diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml new file mode 100644 index 00000000..852959ce --- /dev/null +++ b/.github/workflows/pypi-publish.yml @@ -0,0 +1,55 @@ +# This is based on pyca/cryptography but we use cibuildwheel +# https://github.com/pyca/cryptography/blob/50ae9623df9181e5d08bbca0791ae69af4d3d446/.github/workflows/pypi-publish.yml +name: Publish to PyPI + +on: + workflow_dispatch: + inputs: + run_id: + description: The run of wheel-builder to use for finding artifacts. + required: true + environment: + description: Which PyPI environment to upload to + required: true + type: choice + options: [testpypi, pypi] + workflow_run: + workflows: ["Wheel Builder"] + types: [completed] + +jobs: + publish: + runs-on: ubuntu-latest + # We're not actually verifying that the triggering push event was for a + # tag, because github doesn't expose enough information to do so. + # wheel-builder.yml currently only has push events for tags. + if: github.event_name == 'workflow_dispatch' || (github.event.workflow_run.event == 'push' && github.event.workflow_run.conclusion == 'success') + environment: publish + permissions: + id-token: write + steps: + - name: Download artifacts + uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0 + with: + path: artifacts/ + run_id: ${{ github.event.inputs.run_id || github.event.workflow_run.id }} + + - name: Move artifacts to dist/ + run: | + ls -lR artifacts/ + mkdir dist + mv artifacts/sdist/*.tar.gz dist/ + mv artifacts/wheels/*.whl dist/ + + - name: Publish to pypi.org + uses: pypa/gh-action-pypi-publish@f8c70e705ffc13c3b4d1221169b84f12a75d6ca8 # v1.8.8 + if: github.event_name == 'workflow_run' || (github.event_name == 'workflow_dispatch' && github.event.inputs.environment == 'pypi') + with: + packages-dir: dist/ + + - name: Publish to test.pypi.org + uses: pypa/gh-action-pypi-publish@f8c70e705ffc13c3b4d1221169b84f12a75d6ca8 # v1.8.8 + if: github.event_name == 'workflow_dispatch' && github.event.inputs.environment == 'testpypi' + with: + repository-url: https://test.pypi.org/legacy/ + packages-dir: dist/ diff --git a/.github/workflows/wheel-builder.yml b/.github/workflows/wheel-builder.yml new file mode 100644 index 00000000..0107404a --- /dev/null +++ b/.github/workflows/wheel-builder.yml @@ -0,0 +1,65 @@ +# This is based on pyca/cryptography but we use cibuildwheel +# https://github.com/pyca/cryptography/blob/50ae9623df9181e5d08bbca0791ae69af4d3d446/.github/workflows/wheel-builder.yml +name: Wheel Builder + +permissions: + contents: read +on: + workflow_dispatch: + inputs: + version: + description: The Git ref to build + # Do not add any non-tag push events without updating pypi-publish.yml. If + # you do, it'll upload wheels to PyPI. + push: + tags: + - "*" + pull_request: + paths: + - .github/workflows/wheel-builder.yml + - setup.py + - pyproject.toml + +jobs: + sdist: + name: Build sdist + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + # The tag to build or the tag received by the tag event + ref: ${{ github.event.inputs.version || github.ref }} + persist-credentials: false + + - run: python -m venv .venv + - name: Install Python dependencies + run: .venv/bin/pip install -U pip build + - name: Make sdist + run: .venv/bin/python -m build --sdist + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: sdist + path: dist/*.tar.gz + + build_wheels: + name: Build wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04, windows-2019, macos-11] + + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + # The tag to build or the tag received by the tag event + ref: ${{ github.event.inputs.version || github.ref }} + persist-credentials: false + + - name: Build wheels + uses: pypa/cibuildwheel@f21bb8376a051ffb6cb5604b28ccaef7b90e8ab7 # v2.14.1 + + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: wheels + path: ./wheelhouse/*.whl diff --git a/.gitignore b/.gitignore index 9e999173..9e10a270 100644 --- a/.gitignore +++ b/.gitignore @@ -60,7 +60,7 @@ target/ # Logbook specific / custom ignores .ropeproject -logbook/_speedups.c +src/cython/speedups.c env* .vagrant flycheck-* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..2ee38397 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,29 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-toml + - id: check-yaml + - id: check-added-large-files + - id: check-merge-conflict + - id: end-of-file-fixer + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.7.1 + hooks: + - id: prettier + exclude: docs/sheet/.*\.html$ + - repo: https://github.com/asottile/pyupgrade + rev: v3.7.0 + hooks: + - id: pyupgrade + args: [--py37-plus] + - repo: https://github.com/timothycrosley/isort + rev: 5.12.0 + hooks: + - id: isort + - repo: https://github.com/psf/black + rev: 23.3.0 + hooks: + - id: black diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..3f5023c6 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,13 @@ +version: 2 + +build: + os: ubuntu-20.04 + tools: + python: "3.11" + +python: + install: + - method: pip + path: . + extra_requirements: + - docs diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d924fb1d..00000000 --- a/.travis.yml +++ /dev/null @@ -1,65 +0,0 @@ -language: python -dist: xenial -addons: - apt: - sources: - - chris-lea-redis-server - - sourceline: 'ppa:chris-lea/zeromq' - packages: - - redis-server - - libzmq3-dev -services: -- redis-server -python: -- '2.7' -- '3.5' -- '3.6' -- '3.7' -before_install: - - pip install coveralls -install: -- pip install -U pip -- pip install cython -- cython logbook/_speedups.pyx -env: -- DISABLE_LOGBOOK_CEXT=True -- CYBUILD=True - -script: -- pip install -e .[all] -- if [[ $GEVENT == 'True' ]] ; then pip install gevent; fi -- pytest --cov=logbook -r s tests - -matrix: - exclude: - include: - - python: "3.6" - env: GEVENT=True CYBUILD=True - - python: "2.7" - env: GEVENT=True CYBUILD=True - -after_success: - - coveralls - -notifications: - email: - recipients: - - vmalloc@gmail.com - irc: - channels: - - chat.freenode.net#pocoo - on_success: change - on_failure: always - use_notice: true - skip_join: true -deploy: - - provider: pypi - user: vmalloc - password: - secure: WFmuAbtBDIkeZArIFQRCwyO1TdvF2PaZpo75r3mFgnY+aWm75cdgjZKoNqVprF/f+v9EsX2kDdQ7ZfuhMLgP8MNziB+ty7579ZDGwh64jGoi+DIoeblAFu5xNAqjvhie540uCE8KySk9s+Pq5EpOA5w18V4zxTw+h6tnBQ0M9cQ= - on: - python: "3.7" - condition: $CYBUILD = 'True' - tags: true - repo: getlogbook/logbook - distributions: "sdist" diff --git a/AUTHORS b/AUTHORS index 46b53785..65bb6149 100644 --- a/AUTHORS +++ b/AUTHORS @@ -17,4 +17,3 @@ Contributors: - Raphaël Vinot - Rotem Yaari - Frazer McLean - diff --git a/CHANGES b/CHANGES index 4f2c3ac4..296cf89b 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,35 @@ Logbook Changelog ================= +Version 1.6.0 +------------- + +Released on July 30th, 2023 + +- Dropped support for Python 2.7, 3.5, and 3.6. +- Uses pyproject.toml based build. +- Added nteventlog extra for NTEventLogHandler. +- Supports SQLAlchemy 1.4 and 2.0. +- Fix various deprecation warnings. +- exc_info arg may be a BaseException instance (thanks Mattijs Ugen) +- FileHandler supports path-like objects. +- Fixed bug which prevented compilation on Cython 3 +- Wheels are generated for more platforms and architectures + +Version 1.5.3 +------------- + +Released on October 16th, 2019 + +- Fixed deprecated imports from collections module. + +Version 1.5.2 +------------- + +Released on August 21st, 2019 + +- No changes + Version 1.5.1 ------------- @@ -223,7 +252,7 @@ Version 0.4.1 Released on December 12th. Codename "121212" - Fixed several outstanding encoding problems, thanks to @dvarazzo. -- Merged in minor pull requests (see https://github.com/mitsuhiko/logbook/pulls?&state=closed) +- Merged in minor pull requests (see https://github.com/getlogbook/logbook/pulls?q=is%3Aclosed) Version 0.4 ----------- diff --git a/MANIFEST.in b/MANIFEST.in index 85dd8a3b..642f1122 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,6 @@ -include MANIFEST.in Makefile CHANGES logbook/_speedups.c logbook/_speedups.pyx tox.ini LICENSE -include scripts/test_setup.py -recursive-include tests * +include MANIFEST.in Makefile CHANGES src/cython/speedups.pyx tox.ini LICENSE +exclude src/cython/speedups.c +graft benchmark +graft docs +graft tests +global-exclude *.pyc diff --git a/README.md b/README.md index 7be09b1c..1bd3fcd5 100644 --- a/README.md +++ b/README.md @@ -2,30 +2,20 @@ - - -| | | -|--------------------|-----------------------------| -| Travis | [![Build Status][ti]][tl] | -| AppVeyor | [![Build Status][ai]][al] | -| Supported Versions | ![Supported Versions][vi] | -| Latest Version | [![Latest Version][pi]][pl] | -| Test Coverage | [![Test Coverage][ci]][cl] | - +[![Latest Version][version-img]][pypi] +![Supported Python Versions][pyver-img] +[![GitHub Actions][gha-img]][gha] +[![PyPI Downloads][downloads-img]][pypi] Logbook is a nice logging replacement. It should be easy to setup, use and configure and support web applications :) -For more information: http://logbook.readthedocs.org +For more information: https://logbook.readthedocs.org -[ti]: https://secure.travis-ci.org/getlogbook/logbook.svg?branch=master -[tl]: https://travis-ci.org/getlogbook/logbook -[ai]: https://ci.appveyor.com/api/projects/status/quu99exa26e06npp?svg=true -[vi]: https://img.shields.io/badge/python-2.7%2C3.5%2C3.6%2C3.7-green.svg -[di]: https://img.shields.io/pypi/dm/logbook.svg -[al]: https://ci.appveyor.com/project/vmalloc/logbook -[pi]: https://img.shields.io/pypi/v/logbook.svg -[pl]: https://pypi.org/pypi/Logbook -[ci]: https://coveralls.io/repos/getlogbook/logbook/badge.svg?branch=master&service=github -[cl]: https://coveralls.io/github/getlogbook/logbook?branch=master +[version-img]: https://img.shields.io/pypi/v/logbook.svg +[pypi]: https://pypi.org/pypi/Logbook +[gha-img]: https://img.shields.io/github/actions/workflow/status/getlogbook/logbook/main.yml +[gha]: https://github.com/getlogbook/logbook/actions +[downloads-img]: https://img.shields.io/pypi/dm/logbook +[pyver-img]: https://img.shields.io/pypi/pyversions/logbook diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index bacb4642..00000000 --- a/Vagrantfile +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -PYTHON_VERSIONS = ["python2.6", "python2.7", "python3.3"] - -Vagrant::Config.run do |config| - config.vm.define :box do |config| - config.vm.box = "precise64" - config.vm.box_url = "http://files.vagrantup.com/precise64.box" - config.vm.host_name = "box" - config.vm.provision :shell, :inline => "sudo apt-get -y update" - config.vm.provision :shell, :inline => "sudo apt-get install -y python-software-properties" - config.vm.provision :shell, :inline => "sudo add-apt-repository -y ppa:fkrull/deadsnakes" - config.vm.provision :shell, :inline => "sudo apt-get update" - PYTHON_VERSIONS.each { |python_version| - config.vm.provision :shell, :inline => "sudo apt-get install -y " + python_version + " " + python_version + "-dev" - } - config.vm.provision :shell, :inline => "sudo apt-get install -y libzmq-dev wget libbluetooth-dev libsqlite3-dev" - config.vm.provision :shell, :inline => "wget http://python-distribute.org/distribute_setup.py -O /tmp/distribute_setup.py" - PYTHON_VERSIONS.each { |python_executable| - config.vm.provision :shell, :inline => python_executable + " /tmp/distribute_setup.py" - } - config.vm.provision :shell, :inline => "sudo easy_install tox==1.2" - config.vm.provision :shell, :inline => "sudo easy_install virtualenv==1.6.4" - end -end diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 6f7ae9cc..00000000 --- a/appveyor.yml +++ /dev/null @@ -1,78 +0,0 @@ -cache: - - C:\Users\appveyor\AppData\Local\pip\Cache\wheels - -environment: - global: - # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the - # /E:ON and /V:ON options are not enabled in the batch script intepreter - # See: http://stackoverflow.com/a/13751649/163740 - BUILD: "cmd /E:ON /V:ON /C .\\.appveyor\\build.cmd" - PYPI_USERNAME: - secure: ixvjwUN/HsSfGkU3OvtQ8Q== - PYPI_PASSWORD: - secure: KOr+oEHZJmo1el3bT+ivmQ== - ENABLE_LOGBOOK_NTEVENTLOG_TESTS: "TRUE" - - matrix: - - - PYTHON: "C:\\Python27" - - PYTHON: "C:\\Python27" - CYBUILD: "TRUE" - - - PYTHON: "C:\\Python27-x64" - - PYTHON: "C:\\Python27-x64" - CYBUILD: "TRUE" - - - PYTHON: "C:\\Python35" - - PYTHON: "C:\\Python35" - CYBUILD: "TRUE" - - - PYTHON: "C:\\Python35-x64" - - PYTHON: "C:\\Python35-x64" - CYBUILD: "TRUE" - - - PYTHON: "C:\\Python36" - - PYTHON: "C:\\Python36" - CYBUILD: "TRUE" - - - PYTHON: "C:\\Python36-x64" - - PYTHON: "C:\\Python36-x64" - CYBUILD: "TRUE" - - - PYTHON: "C:\\Python37" - - PYTHON: "C:\\Python37" - CYBUILD: "TRUE" - - - PYTHON: "C:\\Python37-x64" - - PYTHON: "C:\\Python37-x64" - CYBUILD: "TRUE" - -init: - - echo %PYTHON% - - set PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% - -install: - - ".appveyor\\prepare.bat" - -build: off - -test_script: - - py.test -r s tests - -after_test: - - ".appveyor\\after_test.bat" - -artifacts: - # Archive the generated packages in the ci.appveyor.com build report. - - path: dist\*.whl - -deploy: - description: '' - provider: GitHub - auth_token: - secure: 0yLUo/V+wwSvSFk9nBW/77RN9iTjJA1B5p/TM1XgVLPPFEZWkH756jyJ0FOmtJPt - artifact: /.*\.whl/ - draft: true - prerelease: false - on: - appveyor_repo_tag: true diff --git a/benchmark/bench_disabled_introspection.py b/benchmark/bench_disabled_introspection.py index d0693b1c..87d4fe83 100644 --- a/benchmark/bench_disabled_introspection.py +++ b/benchmark/bench_disabled_introspection.py @@ -1,8 +1,7 @@ """Tests with frame introspection disabled""" -from logbook import Logger, NullHandler, Flags +from logbook import Flags, Logger, NullHandler - -log = Logger('Test logger') +log = Logger("Test logger") class DummyHandler(NullHandler): @@ -12,5 +11,5 @@ class DummyHandler(NullHandler): def run(): with Flags(introspection=False): with DummyHandler() as handler: - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/bench_disabled_logger.py b/benchmark/bench_disabled_logger.py index e7d50e26..3572da60 100644 --- a/benchmark/bench_disabled_logger.py +++ b/benchmark/bench_disabled_logger.py @@ -1,11 +1,10 @@ """Tests with the whole logger disabled""" from logbook import Logger - -log = Logger('Test logger') +log = Logger("Test logger") log.disabled = True def run(): - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/bench_enabled_introspection.py b/benchmark/bench_enabled_introspection.py index 67b62ec3..11013bfd 100644 --- a/benchmark/bench_enabled_introspection.py +++ b/benchmark/bench_enabled_introspection.py @@ -1,8 +1,7 @@ """Tests with stack frame introspection enabled""" -from logbook import Logger, NullHandler, Flags +from logbook import Flags, Logger, NullHandler - -log = Logger('Test logger') +log = Logger("Test logger") class DummyHandler(NullHandler): @@ -12,5 +11,5 @@ class DummyHandler(NullHandler): def run(): with Flags(introspection=True): with DummyHandler() as handler: - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/bench_file_handler.py b/benchmark/bench_file_handler.py index bed578ed..6b2c0cf8 100644 --- a/benchmark/bench_file_handler.py +++ b/benchmark/bench_file_handler.py @@ -1,13 +1,13 @@ """Benchmarks the file handler""" -from logbook import Logger, FileHandler from tempfile import NamedTemporaryFile +from logbook import FileHandler, Logger -log = Logger('Test logger') +log = Logger("Test logger") def run(): f = NamedTemporaryFile() with FileHandler(f.name) as handler: - for x in xrange(500): - log.warning('this is handled') + for x in range(500): + log.warning("this is handled") diff --git a/benchmark/bench_file_handler_unicode.py b/benchmark/bench_file_handler_unicode.py index 255b8aac..10059df9 100644 --- a/benchmark/bench_file_handler_unicode.py +++ b/benchmark/bench_file_handler_unicode.py @@ -1,13 +1,13 @@ """Benchmarks the file handler with unicode""" -from logbook import Logger, FileHandler from tempfile import NamedTemporaryFile +from logbook import FileHandler, Logger -log = Logger('Test logger') +log = Logger("Test logger") def run(): f = NamedTemporaryFile() with FileHandler(f.name) as handler: - for x in xrange(500): - log.warning(u'this is handled \x6f') + for x in range(500): + log.warning("this is handled \x6f") diff --git a/benchmark/bench_logger_creation.py b/benchmark/bench_logger_creation.py index d37cacdb..0019084c 100644 --- a/benchmark/bench_logger_creation.py +++ b/benchmark/bench_logger_creation.py @@ -3,5 +3,5 @@ def run(): - for x in xrange(500): - Logger('Test') + for x in range(500): + Logger("Test") diff --git a/benchmark/bench_logger_level_low.py b/benchmark/bench_logger_level_low.py index a3e13422..1ec47b46 100644 --- a/benchmark/bench_logger_level_low.py +++ b/benchmark/bench_logger_level_low.py @@ -1,14 +1,14 @@ """Benchmarks too low logger levels""" -from logbook import Logger, StreamHandler, ERROR -from cStringIO import StringIO +from io import StringIO +from logbook import ERROR, Logger, StreamHandler -log = Logger('Test logger') +log = Logger("Test logger") log.level = ERROR def run(): out = StringIO() with StreamHandler(out): - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/bench_logging_file_handler.py b/benchmark/bench_logging_file_handler.py index 17b1a7e5..37be718c 100644 --- a/benchmark/bench_logging_file_handler.py +++ b/benchmark/bench_logging_file_handler.py @@ -1,14 +1,13 @@ """Tests logging file handler in comparison""" -from logging import getLogger, FileHandler +from logging import FileHandler, getLogger from tempfile import NamedTemporaryFile - -log = getLogger('Testlogger') +log = getLogger("Testlogger") def run(): f = NamedTemporaryFile() handler = FileHandler(f.name) log.addHandler(handler) - for x in xrange(500): - log.warning('this is handled') + for x in range(500): + log.warning("this is handled") diff --git a/benchmark/bench_logging_file_handler_unicode.py b/benchmark/bench_logging_file_handler_unicode.py index 7bb87c16..7ff319f1 100644 --- a/benchmark/bench_logging_file_handler_unicode.py +++ b/benchmark/bench_logging_file_handler_unicode.py @@ -1,14 +1,13 @@ """Tests logging file handler in comparison""" -from logging import getLogger, FileHandler +from logging import FileHandler, getLogger from tempfile import NamedTemporaryFile - -log = getLogger('Testlogger') +log = getLogger("Testlogger") def run(): f = NamedTemporaryFile() handler = FileHandler(f.name) log.addHandler(handler) - for x in xrange(500): - log.warning(u'this is handled \x6f') + for x in range(500): + log.warning("this is handled \x6f") diff --git a/benchmark/bench_logging_logger_creation.py b/benchmark/bench_logging_logger_creation.py index 0d877e03..3004f299 100644 --- a/benchmark/bench_logging_logger_creation.py +++ b/benchmark/bench_logging_logger_creation.py @@ -1,11 +1,10 @@ """Test with no handler active""" from logging import getLogger - root_logger = getLogger() def run(): - for x in xrange(500): - getLogger('Test') - del root_logger.manager.loggerDict['Test'] + for x in range(500): + getLogger("Test") + del root_logger.manager.loggerDict["Test"] diff --git a/benchmark/bench_logging_logger_level_low.py b/benchmark/bench_logging_logger_level_low.py index 4f065966..4e018adc 100644 --- a/benchmark/bench_logging_logger_level_low.py +++ b/benchmark/bench_logging_logger_level_low.py @@ -1,9 +1,8 @@ """Tests with a logging handler becoming a noop for comparison""" -from logging import getLogger, StreamHandler, ERROR -from cStringIO import StringIO +from io import StringIO +from logging import ERROR, StreamHandler, getLogger - -log = getLogger('Testlogger') +log = getLogger("Testlogger") log.setLevel(ERROR) @@ -11,5 +10,5 @@ def run(): out = StringIO() handler = StreamHandler(out) log.addHandler(handler) - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/bench_logging_noop.py b/benchmark/bench_logging_noop.py index a3a8099d..ad942e13 100644 --- a/benchmark/bench_logging_noop.py +++ b/benchmark/bench_logging_noop.py @@ -1,9 +1,8 @@ """Tests with a logging handler becoming a noop for comparison""" -from logging import getLogger, StreamHandler, ERROR -from cStringIO import StringIO +from io import StringIO +from logging import ERROR, StreamHandler, getLogger - -log = getLogger('Testlogger') +log = getLogger("Testlogger") def run(): @@ -11,5 +10,5 @@ def run(): handler = StreamHandler(out) handler.setLevel(ERROR) log.addHandler(handler) - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/bench_logging_noop_filter.py b/benchmark/bench_logging_noop_filter.py index 6043c480..13a6de5d 100644 --- a/benchmark/bench_logging_noop_filter.py +++ b/benchmark/bench_logging_noop_filter.py @@ -1,9 +1,8 @@ """Tests with a filter disabling a handler for comparsion in logging""" -from logging import getLogger, StreamHandler, Filter -from cStringIO import StringIO +from io import StringIO +from logging import Filter, StreamHandler, getLogger - -log = getLogger('Testlogger') +log = getLogger("Testlogger") class DisableFilter(Filter): @@ -16,5 +15,5 @@ def run(): handler = StreamHandler(out) handler.addFilter(DisableFilter()) log.addHandler(handler) - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/bench_logging_stream_handler.py b/benchmark/bench_logging_stream_handler.py index a21d41f5..7935b800 100644 --- a/benchmark/bench_logging_stream_handler.py +++ b/benchmark/bench_logging_stream_handler.py @@ -1,14 +1,13 @@ """Tests the stream handler in logging""" +from io import StringIO from logging import Logger, StreamHandler -from cStringIO import StringIO - -log = Logger('Test logger') +log = Logger("Test logger") def run(): out = StringIO() log.addHandler(StreamHandler(out)) - for x in xrange(500): - log.warning('this is not handled') - assert out.getvalue().count('\n') == 500 + for x in range(500): + log.warning("this is not handled") + assert out.getvalue().count("\n") == 500 diff --git a/benchmark/bench_noop.py b/benchmark/bench_noop.py index 65031161..3b9f6ba0 100644 --- a/benchmark/bench_noop.py +++ b/benchmark/bench_noop.py @@ -1,15 +1,15 @@ """Test with no handler active""" -from logbook import Logger, StreamHandler, NullHandler, ERROR -from cStringIO import StringIO +from io import StringIO +from logbook import ERROR, Logger, NullHandler, StreamHandler -log = Logger('Test logger') +log = Logger("Test logger") def run(): out = StringIO() with NullHandler(): with StreamHandler(out, level=ERROR) as handler: - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") assert not out.getvalue() diff --git a/benchmark/bench_noop_filter.py b/benchmark/bench_noop_filter.py index 220adcd4..38a7be04 100644 --- a/benchmark/bench_noop_filter.py +++ b/benchmark/bench_noop_filter.py @@ -1,14 +1,14 @@ -from logbook import Logger, StreamHandler, NullHandler -from cStringIO import StringIO +from io import StringIO +from logbook import Logger, NullHandler, StreamHandler -log = Logger('Test logger') +log = Logger("Test logger") def run(): out = StringIO() with NullHandler(): with StreamHandler(out, filter=lambda r, h: False) as handler: - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") assert not out.getvalue() diff --git a/benchmark/bench_noop_filter_on_handler.py b/benchmark/bench_noop_filter_on_handler.py index fd9714ad..255a42c8 100644 --- a/benchmark/bench_noop_filter_on_handler.py +++ b/benchmark/bench_noop_filter_on_handler.py @@ -1,9 +1,9 @@ """Like the filter test, but with the should_handle implemented""" -from logbook import Logger, StreamHandler, NullHandler -from cStringIO import StringIO +from io import StringIO +from logbook import Logger, NullHandler, StreamHandler -log = Logger('Test logger') +log = Logger("Test logger") class CustomStreamHandler(StreamHandler): @@ -15,6 +15,6 @@ def run(): out = StringIO() with NullHandler(): with CustomStreamHandler(out) as handler: - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") assert not out.getvalue() diff --git a/benchmark/bench_redirect_from_logging.py b/benchmark/bench_redirect_from_logging.py index 8835925b..1b4ab147 100644 --- a/benchmark/bench_redirect_from_logging.py +++ b/benchmark/bench_redirect_from_logging.py @@ -1,17 +1,17 @@ """Tests redirects from logging to logbook""" +from io import StringIO from logging import getLogger + from logbook import StreamHandler from logbook.compat import redirect_logging -from cStringIO import StringIO - redirect_logging() -log = getLogger('Test logger') +log = getLogger("Test logger") def run(): out = StringIO() with StreamHandler(out): - for x in xrange(500): - log.warning('this is not handled') - assert out.getvalue().count('\n') == 500 + for x in range(500): + log.warning("this is not handled") + assert out.getvalue().count("\n") == 500 diff --git a/benchmark/bench_redirect_to_logging.py b/benchmark/bench_redirect_to_logging.py index 4967855a..b19a2961 100644 --- a/benchmark/bench_redirect_to_logging.py +++ b/benchmark/bench_redirect_to_logging.py @@ -1,16 +1,16 @@ """Tests redirects from logging to logbook""" -from logging import getLogger, StreamHandler -from logbook.compat import LoggingHandler -from cStringIO import StringIO +from io import StringIO +from logging import StreamHandler, getLogger +from logbook.compat import LoggingHandler -log = getLogger('Test logger') +log = getLogger("Test logger") def run(): out = StringIO() log.addHandler(StreamHandler(out)) with LoggingHandler(): - for x in xrange(500): - log.warning('this is not handled') - assert out.getvalue().count('\n') == 500 + for x in range(500): + log.warning("this is not handled") + assert out.getvalue().count("\n") == 500 diff --git a/benchmark/bench_stack_manipulation.py b/benchmark/bench_stack_manipulation.py index 0f546ea9..671262a7 100644 --- a/benchmark/bench_stack_manipulation.py +++ b/benchmark/bench_stack_manipulation.py @@ -1,8 +1,8 @@ """Tests basic stack manipulation performance""" -from logbook import Handler, NullHandler, StreamHandler, FileHandler, \ - ERROR, WARNING +from io import StringIO from tempfile import NamedTemporaryFile -from cStringIO import StringIO + +from logbook import ERROR, WARNING, FileHandler, Handler, NullHandler, StreamHandler def run(): @@ -11,5 +11,5 @@ def run(): with NullHandler(): with StreamHandler(out, level=WARNING): with FileHandler(f.name, level=ERROR): - for x in xrange(100): + for x in range(100): list(Handler.stack_manager.iter_context_objects()) diff --git a/benchmark/bench_stream_handler.py b/benchmark/bench_stream_handler.py index 9449d2ee..f3ae8753 100644 --- a/benchmark/bench_stream_handler.py +++ b/benchmark/bench_stream_handler.py @@ -1,14 +1,14 @@ """Tests the stream handler""" -from logbook import Logger, StreamHandler -from cStringIO import StringIO +from io import StringIO +from logbook import Logger, StreamHandler -log = Logger('Test logger') +log = Logger("Test logger") def run(): out = StringIO() with StreamHandler(out) as handler: - for x in xrange(500): - log.warning('this is not handled') - assert out.getvalue().count('\n') == 500 + for x in range(500): + log.warning("this is not handled") + assert out.getvalue().count("\n") == 500 diff --git a/benchmark/bench_test_handler.py b/benchmark/bench_test_handler.py index 68b4de14..aa17dc22 100644 --- a/benchmark/bench_test_handler.py +++ b/benchmark/bench_test_handler.py @@ -1,11 +1,10 @@ """Tests the test handler""" from logbook import Logger, TestHandler - -log = Logger('Test logger') +log = Logger("Test logger") def run(): with TestHandler() as handler: - for x in xrange(500): - log.warning('this is not handled') + for x in range(500): + log.warning("this is not handled") diff --git a/benchmark/run.py b/benchmark/run.py index da2e3e08..3544dba5 100644 --- a/benchmark/run.py +++ b/benchmark/run.py @@ -2,20 +2,20 @@ """ Runs the benchmarks """ -from __future__ import print_function -import sys import os import re +import sys from subprocess import Popen try: from pkg_resources import get_distribution - version = get_distribution('Logbook').version + + version = get_distribution("Logbook").version except Exception: - version = 'unknown version' + version = "unknown version" -_filename_re = re.compile(r'^bench_(.*?)\.py$') +_filename_re = re.compile(r"^bench_(.*?)\.py$") bench_directory = os.path.abspath(os.path.dirname(__file__)) @@ -25,38 +25,49 @@ def list_benchmarks(): match = _filename_re.match(name) if match is not None: result.append(match.group(1)) - result.sort(key=lambda x: (x.startswith('logging_'), x.lower())) + result.sort(key=lambda x: (x.startswith("logging_"), x.lower())) return result def run_bench(name, use_gevent=False): - sys.stdout.write('%-32s' % name) + sys.stdout.write("%-32s" % name) sys.stdout.flush() - Popen([sys.executable, '-mtimeit', '-s', - 'from bench_%s import run' % name, - 'from logbook.concurrency import enable_gevent', - 'enable_gevent()' if use_gevent else '', - 'run()']).wait() + Popen( + [ + sys.executable, + "-mtimeit", + "-s", + "from bench_%s import run" % name, + "from logbook.concurrency import enable_gevent", + "enable_gevent()" if use_gevent else "", + "run()", + ] + ).wait() def bench_wrapper(use_gevent=False): - print('=' * 80) - print('Running benchmark with Logbook %s (gevent enabled=%s)' % (version, use_gevent)) - print('-' * 80) + print("=" * 80) + print( + "Running benchmark with Logbook {} (gevent enabled={})".format( + version, use_gevent + ) + ) + print("-" * 80) os.chdir(bench_directory) for bench in list_benchmarks(): run_bench(bench, use_gevent) - print('-' * 80) + print("-" * 80) def main(): bench_wrapper(False) try: import gevent + bench_wrapper(True) except ImportError: pass -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/docs/api/base.rst b/docs/api/base.rst index 547430a1..c7793eea 100644 --- a/docs/api/base.rst +++ b/docs/api/base.rst @@ -3,7 +3,7 @@ Core Interface This implements the core interface. -.. module:: logbook +.. currentmodule:: logbook .. autoclass:: Logger :members: diff --git a/docs/api/compat.rst b/docs/api/compat.rst index 15ca267d..c6d738e7 100644 --- a/docs/api/compat.rst +++ b/docs/api/compat.rst @@ -4,7 +4,7 @@ Compatibility This documents compatibility support with existing systems such as :mod:`logging` and :mod:`warnings`. -.. module:: logbook.compat +.. currentmodule:: logbook.compat Logging Compatibility --------------------- diff --git a/docs/api/handlers.rst b/docs/api/handlers.rst index ac27554a..f4690af0 100644 --- a/docs/api/handlers.rst +++ b/docs/api/handlers.rst @@ -6,7 +6,7 @@ handlers. There are additional handlers for special purposes in the :mod:`logbook.more`, :mod:`logbook.ticketing` and :mod:`logbook.queues` modules. -.. module:: logbook +.. currentmodule:: logbook Base Interface -------------- diff --git a/docs/api/internal.rst b/docs/api/internal.rst index c1aa0d04..9442b31f 100644 --- a/docs/api/internal.rst +++ b/docs/api/internal.rst @@ -4,7 +4,7 @@ Internal API This documents the internal API that might be useful for more advanced setups or custom handlers. -.. module:: logbook.base +.. currentmodule:: logbook.base .. autofunction:: dispatch_record @@ -18,7 +18,7 @@ setups or custom handlers. :members: :inherited-members: -.. module:: logbook.handlers +.. currentmodule:: logbook.handlers .. autoclass:: StringFormatterHandlerMixin :members: diff --git a/docs/api/more.rst b/docs/api/more.rst index 738b9953..eb06aecc 100644 --- a/docs/api/more.rst +++ b/docs/api/more.rst @@ -6,7 +6,7 @@ beyond the scope of Logbook itself or depend on external libraries. Additionally there are some handlers in :mod:`logbook.ticketing`, :mod:`logbook.queues` and :mod:`logbook.notifiers`. -.. module:: logbook.more +.. currentmodule:: logbook.more Tagged Logging -------------- diff --git a/docs/api/notifiers.rst b/docs/api/notifiers.rst index 8a1a4048..4dec4c25 100644 --- a/docs/api/notifiers.rst +++ b/docs/api/notifiers.rst @@ -8,7 +8,7 @@ that depend on external libraries. The more module implements special handlers and other things that are beyond the scope of Logbook itself or depend on external libraries. -.. module:: logbook.notifiers +.. currentmodule:: logbook.notifiers .. autofunction:: create_notification_handler diff --git a/docs/api/queues.rst b/docs/api/queues.rst index 3e961b9a..b7ab8a3d 100644 --- a/docs/api/queues.rst +++ b/docs/api/queues.rst @@ -6,7 +6,7 @@ system. This is useful for distributed setups where you want multiple processes to log to the same backend. Currently supported are ZeroMQ as well as the :mod:`multiprocessing` :class:`~multiprocessing.Queue` class. -.. module:: logbook.queues +.. currentmodule:: logbook.queues ZeroMQ ------ diff --git a/docs/api/ticketing.rst b/docs/api/ticketing.rst index 5628e97d..e39359a5 100644 --- a/docs/api/ticketing.rst +++ b/docs/api/ticketing.rst @@ -6,7 +6,7 @@ log records are categorized by location and for every emitted log record a count is added. That way you know how often certain messages are triggered, at what times and when the last occurrence was. -.. module:: logbook.ticketing +.. currentmodule:: logbook.ticketing .. autoclass:: TicketingBaseHandler :members: diff --git a/docs/api/utilities.rst b/docs/api/utilities.rst index 22d35e33..69afefd6 100644 --- a/docs/api/utilities.rst +++ b/docs/api/utilities.rst @@ -6,7 +6,7 @@ Misc. Utilities This documents general purpose utility functions available in Logbook. -.. module:: logbook +.. currentmodule:: logbook .. autofunction:: debug @@ -33,7 +33,7 @@ This documents general purpose utility functions available in Logbook. Slow Operations Logging ----------------------- -.. module:: logbook.utils +.. currentmodule:: logbook.utils .. autofunction:: logged_if_slow @@ -43,4 +43,3 @@ Deprecations .. autofunction:: deprecated .. autofunction:: suppressed_deprecations - diff --git a/docs/conf.py b/docs/conf.py index aa90731f..00f8cc94 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,228 +1,52 @@ -# -*- coding: utf-8 -*- +# Configuration file for the Sphinx documentation builder. # -# Logbook documentation build configuration file, created by -# sphinx-quickstart on Fri Jul 23 16:54:49 2010. -# -# This file is execfile()d with the current directory set to its containing -# dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.extend((os.path.abspath('.'), os.path.abspath('..'))) - -# -- General configuration ---------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Logbook' -copyright = u'2010, Armin Ronacher, Georg Brandl' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -with open(os.path.join(os.path.dirname(__file__), "..", "logbook", "__version__.py")) as version_file: - # can't use import here... - version = release = version_file.read().strip().split("=")[1].strip()[1:-1] -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None +if sys.version_info < (3, 8): + from importlib_metadata import distribution +else: + from importlib.metadata import distribution -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] +project = "Logbook" +project_copyright = "2010, Armin Ronacher, Georg Brandl" +version = release = distribution("Logbook").version -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.viewcode", +] +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +pygments_style = "sphinx" -# -- Options for HTML output -------------------------------------------------- -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'sheet' +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. +html_theme = "sheet" html_theme_options = { - 'nosidebar': True, + "nosidebar": True, } - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = ['.'] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". +html_theme_path = ["."] html_title = "Logbook" - -# A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "Logbook " + release +html_static_path = ["_static"] -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# html_add_permalinks = '' - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Logbookdoc' - - -# -- Options for LaTeX output ------------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, -# documentclass [howto/manual]). -latex_documents = [ - ('index', 'Logbook.tex', u'Logbook Documentation', - u'Armin Ronacher, Georg Brandl', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output ------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'logbook', u'Logbook Documentation', - [u'Armin Ronacher, Georg Brandl'], 1) -] +# -- Extension configuration ------------------------------------------------- intersphinx_mapping = { - 'http://docs.python.org': None + "python": ("https://docs.python.org/3", None), } diff --git a/docs/index.rst b/docs/index.rst index 9d40b5d0..6b86e5e2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -7,7 +7,7 @@ in mind and the idea to make logging fun: >>> from logbook import Logger, StreamHandler >>> import sys ->>> StreamHandler(sys.stdout).push_application() +>>> StreamHandler(sys.stdout).push_application() >>> log = Logger('Logbook') >>> log.info('Hello, World!') [2015-10-05 18:55:56.937141] INFO: Logbook: Hello, World! @@ -47,10 +47,9 @@ Project Information .. cssclass:: toctree-l1 * `Download from PyPI`_ -* `Master repository on GitHub`_ +* `GitHub repository`_ * `Mailing list`_ -* IRC: ``#pocoo`` on freenode -.. _Download from PyPI: https://pypi.org/pypi/Logbook -.. _Master repository on GitHub: https://github.com/getlogbook/logbook -.. _Mailing list: http://groups.google.com/group/pocoo-libs +.. _Download from PyPI: https://pypi.org/project/Logbook +.. _GitHub repository: https://github.com/getlogbook/logbook +.. _Mailing list: https://groups.google.com/g/pocoo-libs diff --git a/docs/quickstart.rst b/docs/quickstart.rst index dc914ee9..2782b84a 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -8,7 +8,7 @@ class, create yourself a logger and you are set: >>> from logbook import Logger, StreamHandler >>> import sys ->>> StreamHandler(sys.stdout).push_application() +>>> StreamHandler(sys.stdout).push_application() >>> log = Logger('My Awesome Logger') >>> log.warn('This is too cool for stdlib') [2015-10-05 19:02:03.575723] WARNING: My Awesome Logger: This is too cool for stdlib @@ -71,15 +71,13 @@ On top of those there are a couple of handlers for special use cases: * :class:`logbook.notifiers.GrowlHandler` and :class:`logbook.notifiers.LibNotifyHandler` for logging to the OS X Growl or the linux notification daemon. -* :class:`logbook.notifiers.BoxcarHandler` for logging to `boxcar`_. +* :class:`logbook.notifiers.BoxcarHandler` for logging to boxcar.io. * :class:`logbook.more.TwitterHandler` for logging to twitter. * :class:`logbook.more.ExternalApplicationHandler` for logging to an external application such as the OS X ``say`` command. * :class:`logbook.ticketing.TicketingHandler` for creating tickets from log records in a database or other data store. -.. _boxcar: http://boxcar.io/ - Registering Handlers -------------------- diff --git a/docs/sheet/layout.html b/docs/sheet/layout.html index 13036145..33b71a1a 100644 --- a/docs/sheet/layout.html +++ b/docs/sheet/layout.html @@ -11,7 +11,6 @@
diff --git a/docs/ticketing.rst b/docs/ticketing.rst index 0252ad3a..dde6b5f1 100644 --- a/docs/ticketing.rst +++ b/docs/ticketing.rst @@ -65,4 +65,4 @@ Alternative backends can be swapped in by providing the `backend` parameter. There is a second implementation of a backend that is using MongoDB: :class:`~logbook.ticketing.MongoDBBackend`. -.. _SQLAlchemy: http://sqlalchemy.org/ +.. _SQLAlchemy: https://www.sqlalchemy.org/ diff --git a/logbook/__version__.py b/logbook/__version__.py deleted file mode 100644 index a06ff4e0..00000000 --- a/logbook/__version__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "1.5.3" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..d6df1f37 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,58 @@ +[build-system] +requires = ["setuptools", "Cython; python_implementation == 'CPython'"] +build-backend = "setuptools.build_meta" + +[project] +name = "Logbook" +license = { text = "BSD-3-Clause" } +authors = [ + { name = "Armin Ronacher", email = "armin.ronacher@active-4.com" }, + { name = "Georg Brandl" }, +] +description = "A logging replacement for Python" +readme = "README.md" +maintainers = [ + { name = "Frazer McLean", email = "frazer@frazermclean.co.uk" }, +] +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", +] +requires-python = ">=3.7" +dynamic = ["version"] + +[project.urls] +Documentation = "https://logbook.readthedocs.io" +"Source Code" = "https://github.com/getlogbook/logbook" + +[project.optional-dependencies] +test = ["pytest>=6", "pytest-rerunfailures"] +dev = ["Logbook[test]", "tox>=4"] +execnet = ["execnet>=1.0.9"] +sqlalchemy = ["sqlalchemy>=1.4"] +redis = ["redis"] +zmq = ["pyzmq"] +jinja = ["Jinja2"] +compression = ["brotli"] +all = ["Logbook[execnet,sqlalchemy,redis,zmq,jinja,compression,nteventlog]"] +nteventlog = ["pywin32; platform_system == 'Windows'"] +docs = ["Sphinx", "importlib_metadata; python_version < '3.8'"] + +[tool.pytest.ini_options] +testpaths = ["tests"] + +[tool.isort] +profile = "black" + +[tool.cibuildwheel] +skip = "pp*" + +[tool.cibuildwheel.macos] +archs = ["x86_64", "universal2", "arm64"] + +[tool.cibuildwheel.linux] +archs = ["x86_64"] diff --git a/scripts/make-release.py b/scripts/make-release.py deleted file mode 100644 index 23cbdc7c..00000000 --- a/scripts/make-release.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" - make-release - ~~~~~~~~~~~~ - - Helper script that performs a release. Does pretty much everything - automatically for us. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -import sys -import os -import re -import argparse -from datetime import datetime, date -from subprocess import Popen, PIPE - -_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)') - - -def parse_changelog(): - with open('CHANGES') as f: - lineiter = iter(f) - for line in lineiter: - match = re.search('^Version\s+(.*)', line.strip()) - if match is None: - continue - version = match.group(1).strip() - if lineiter.next().count('-') != len(match.group(0)): - continue - while 1: - change_info = lineiter.next().strip() - if change_info: - break - - match = re.search(r'released on (\w+\s+\d+\w+\s+\d+)' - r'(?:, codename (.*))?(?i)', change_info) - if match is None: - continue - - datestr, codename = match.groups() - return version, parse_date(datestr), codename - - -def bump_version(version): - try: - parts = map(int, version.split('.')) - except ValueError: - fail('Current version is not numeric') - parts[-1] += 1 - return '.'.join(map(str, parts)) - - -def parse_date(string): - string = _date_clean_re.sub(r'\1', string) - return datetime.strptime(string, '%B %d %Y') - - -def set_filename_version(filename, version_number, pattern): - changed = [] - - def inject_version(match): - before, old, after = match.groups() - changed.append(True) - return before + version_number + after - with open(filename) as f: - contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern, - inject_version, f.read()) - - if not changed: - fail('Could not find %s in %s', pattern, filename) - - with open(filename, 'w') as f: - f.write(contents) - - -def set_version(version): - info('Setting version to %s', version) - with open('logbook/__version__.py', 'w') as f: - f.write('__version__ = {!r}'.format(version)) - - -def fail(message, *args): - print >> sys.stderr, 'Error:', message % args - sys.exit(1) - - -def info(message, *args): - print >> sys.stderr, message % args - - -def get_git_tags(): - return set(Popen(['git', 'tag'], - stdout=PIPE).communicate()[0].splitlines()) - - -def git_is_clean(): - return Popen(['git', 'diff', '--quiet']).wait() == 0 - - -def make_git_commit(message, *args): - message = message % args - Popen(['git', 'commit', '-am', message]).wait() - - -def make_git_tag(tag): - info('Tagging "%s"', tag) - Popen(['git', 'tag', tag]).wait() - - -parser = argparse.ArgumentParser("%prog [options]") -parser.add_argument("--no-upload", dest="upload", - action="store_false", default=True) - - -def main(): - args = parser.parse_args() - - os.chdir(os.path.join(os.path.dirname(__file__), '..')) - - rv = parse_changelog() - if rv is None: - fail('Could not parse changelog') - - version, release_date, codename = rv - dev_version = bump_version(version) + '-dev' - - info('Releasing %s (codename %s, release date %s)', - version, codename, release_date.strftime('%d/%m/%Y')) - tags = get_git_tags() - - if version in tags: - fail('Version "%s" is already tagged', version) - if release_date.date() != date.today(): - fail('Release date is not today (%s != %s)' % - (release_date.date(), date.today())) - - if not git_is_clean(): - fail('You have uncommitted changes in git') - - set_version(version) - make_git_commit('Bump version number to %s', version) - make_git_tag(version) - set_version(dev_version) - make_git_commit('Bump version number to %s', dev_version) - - -if __name__ == '__main__': - main() diff --git a/scripts/test_setup.py b/scripts/test_setup.py deleted file mode 100644 index f7b62c2b..00000000 --- a/scripts/test_setup.py +++ /dev/null @@ -1,18 +0,0 @@ -#! /usr/bin/python -from pip._internal import main as pip_main -import sys - -if __name__ == '__main__': - python_version = sys.version_info - - deps = [ - "execnet>=1.0.9", - "pytest", - "pyzmq", - "sqlalchemy", - "Jinja2", - ] - - print("Setting up dependencies...") - result = pip_main(["install"] + deps) - sys.exit(result) diff --git a/scripts/travis_build.py b/scripts/travis_build.py deleted file mode 100644 index db3f794d..00000000 --- a/scripts/travis_build.py +++ /dev/null @@ -1,18 +0,0 @@ -#! /usr/bin/python -from __future__ import print_function -import ast -import os -import subprocess -import sys - -_PYPY = hasattr(sys, "pypy_version_info") - -if __name__ == '__main__': - use_cython = ast.literal_eval(os.environ["USE_CYTHON"]) - if use_cython and _PYPY: - print("PyPy+Cython configuration skipped") - else: - sys.exit( - subprocess.call( - "make cybuild test" if use_cython else "make test", shell=True) - ) diff --git a/setup.cfg b/setup.cfg index 60070cd2..a5e522fe 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,11 @@ -[build_sphinx] -source-dir = docs/ -build-dir = docs/_build -all_files = 1 +[metadata] +version = attr: logbook.__version__ -[upload_docs] -upload-dir = docs/_build/html +[options] +packages = find: +package_dir = + =src +zip_safe = False + +[options.packages.find] +where = src diff --git a/setup.py b/setup.py index 2f2a5e37..a4042d58 100644 --- a/setup.py +++ b/setup.py @@ -1,245 +1,36 @@ -r""" -Logbook -------- - -An awesome logging implementation that is fun to use. - -Quickstart -`````````` - -:: - - from logbook import Logger - log = Logger('A Fancy Name') - - log.warn('Logbook is too awesome for most applications') - log.error("Can't touch this") - -Works for web apps too -`````````````````````` - -:: - - from logbook import MailHandler, Processor - - mailhandler = MailHandler(from_addr='servererror@example.com', - recipients=['admin@example.com'], - level='ERROR', format_string=u'''\ - Subject: Application Error for {record.extra[path]} [{record.extra[method]}] - - Message type: {record.level_name} - Location: {record.filename}:{record.lineno} - Module: {record.module} - Function: {record.func_name} - Time: {record.time:%Y-%m-%d %H:%M:%S} - Remote IP: {record.extra[ip]} - Request: {record.extra[path]} [{record.extra[method]}] - - Message: - - {record.message} - ''') - - def handle_request(request): - def inject_extra(record, handler): - record.extra['ip'] = request.remote_addr - record.extra['method'] = request.method - record.extra['path'] = request.path - - with Processor(inject_extra): - with mailhandler: - # execute code that might fail in the context of the - # request. -""" - import os import platform -import sys -from itertools import chain - -from distutils.command.build_ext import build_ext -from distutils.errors import ( - CCompilerError, DistutilsExecError, DistutilsPlatformError) -from setuptools import Distribution as _Distribution, Extension, setup -from setuptools.command.test import test as TestCommand - -cmdclass = {} -if sys.version_info < (2, 6): - raise Exception('Logbook requires Python 2.6 or higher.') - -cpython = platform.python_implementation() == 'CPython' - -ext_modules = [Extension('logbook._speedups', sources=['logbook/_speedups.c'])] - -ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) -if sys.platform == 'win32': - # 2.6's distutils.msvc9compiler can raise an IOError when failing to - # find the compiler - ext_errors += (IOError,) - - -class BuildFailed(Exception): - def __init__(self): - self.cause = sys.exc_info()[1] # work around py 2/3 different syntax - - -class ve_build_ext(build_ext): - """This class allows C extension building to fail.""" - def run(self): - try: - build_ext.run(self) - except DistutilsPlatformError: - raise BuildFailed() +from setuptools import Extension, setup - def build_extension(self, ext): - try: - build_ext.build_extension(self, ext) - except ext_errors: - raise BuildFailed() - except ValueError: - # this can happen on Windows 64 bit, see Python issue 7511 - if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3 - raise BuildFailed() - raise - -cmdclass['build_ext'] = ve_build_ext - - -class Distribution(_Distribution): - - def has_ext_modules(self): - # We want to always claim that we have ext_modules. This will be fine - # if we don't actually have them (such as on PyPy) because nothing - # will get built, however we don't want to provide an overally broad - # Wheel package when building a wheel without C support. This will - # ensure that Wheel knows to treat us as if the build output is - # platform specific. - return True - - -class PyTest(TestCommand): - # from https://pytest.org/latest/goodpractises.html\ - # #integration-with-setuptools-test-commands - user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')] - - default_options = ['tests'] - - def initialize_options(self): - TestCommand.initialize_options(self) - self.pytest_args = '' - - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = [] - self.test_suite = True - - def run_tests(self): - # import here, cause outside the eggs aren't loaded - import pytest - errno = pytest.main( - ' '.join(self.default_options) + ' ' + self.pytest_args) - sys.exit(errno) - -cmdclass['test'] = PyTest +IS_CPYTHON = platform.python_implementation() == "CPython" +DISABLE_EXTENSION = bool(os.environ.get("DISABLE_LOGBOOK_CEXT")) def status_msgs(*msgs): - print('*' * 75) + print("*" * 75) for msg in msgs: print(msg) - print('*' * 75) - -version_file_path = os.path.join( - os.path.dirname(__file__), 'logbook', '__version__.py') + print("*" * 75) -with open(version_file_path) as version_file: - exec(version_file.read()) # pylint: disable=W0122 -extras_require = dict() -if sys.version_info[:2] < (3, 0): - extras_require['test'] = set(['pytest', 'pytest-cov<2.6']) -else: - extras_require['test'] = set(['pytest>4.0', 'pytest-cov>=2.6']) - -if sys.version_info[:2] < (3, 3): - extras_require['test'] |= set(['mock']) - -extras_require['dev'] = set(['cython']) | extras_require['test'] - -extras_require['execnet'] = set(['execnet>=1.0.9']) -extras_require['sqlalchemy'] = set(['sqlalchemy']) -extras_require['redis'] = set(['redis']) -extras_require['zmq'] = set(['pyzmq']) -extras_require['jinja'] = set(['Jinja2']) -extras_require['compression'] = set(['brotli']) - -extras_require['all'] = set(chain.from_iterable(extras_require.values())) - - -def run_setup(with_cext): - kwargs = {} - if with_cext: - kwargs['ext_modules'] = ext_modules - else: - kwargs['ext_modules'] = [] - - setup( - name='Logbook', - version=__version__, - license='BSD', - url='http://logbook.pocoo.org/', - author='Armin Ronacher, Georg Brandl', - author_email='armin.ronacher@active-4.com', - description='A logging replacement for Python', - long_description=__doc__, - packages=['logbook'], - zip_safe=False, - platforms='any', - cmdclass=cmdclass, - tests_require=['pytest'], - classifiers=[ - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - - ], - extras_require=extras_require, - distclass=Distribution, - **kwargs - ) - -if not cpython: - run_setup(False) +if not IS_CPYTHON: status_msgs( - 'WARNING: C extensions are not supported on ' + - 'this Python platform, speedups are not enabled.', - 'Plain-Python build succeeded.' + "WARNING: C extensions are not supported on this Python platform, " + "speedups are not enabled.", ) -elif os.environ.get('DISABLE_LOGBOOK_CEXT'): - run_setup(False) + ext_modules = [] +elif DISABLE_EXTENSION: status_msgs( - 'DISABLE_LOGBOOK_CEXT is set; ' + - 'not attempting to build C extensions.', - 'Plain-Python build succeeded.' + "DISABLE_LOGBOOK_CEXT is set; not attempting to build C extensions.", ) + ext_modules = [] else: - try: - run_setup(True) - except BuildFailed as exc: - status_msgs( - exc.cause, - 'WARNING: The C extension could not be compiled, ' + - 'speedups are not enabled.', - 'Failure information, if any, is above.', - 'Retrying the build without the C extension now.' - ) + from Cython.Build import cythonize - run_setup(False) + ext_modules = cythonize( + [Extension("logbook._speedups", sources=["src/cython/speedups.pyx"])], + language_level=3, + ) - status_msgs( - 'WARNING: The C extension could not be compiled, ' + - 'speedups are not enabled.', - 'Plain-Python build succeeded.' - ) +setup(ext_modules=ext_modules) diff --git a/logbook/_speedups.pyx b/src/cython/speedups.pyx similarity index 94% rename from logbook/_speedups.pyx rename to src/cython/speedups.pyx index 8e1d2162..a4bb55a9 100644 --- a/logbook/_speedups.pyx +++ b/src/cython/speedups.pyx @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# cython: language_level=2 """ logbook._speedups ~~~~~~~~~~~~~~~~~ @@ -11,14 +9,27 @@ """ -from logbook.concurrency import (is_gevent_enabled, thread_get_ident, greenlet_get_ident, thread_local, - GreenletRLock, greenlet_local, ContextVar, context_get_ident, is_context_enabled) +from logbook.concurrency import ( + ContextVar, + GreenletRLock, + context_get_ident, + greenlet_get_ident, + greenlet_local, + is_context_enabled, + is_gevent_enabled, + thread_get_ident, + thread_local, +) from cpython.dict cimport PyDict_Clear, PyDict_SetItem -from cpython.list cimport PyList_Append, PyList_Sort, PyList_GET_SIZE - -from cpython.pythread cimport PyThread_type_lock, PyThread_allocate_lock, \ - PyThread_release_lock, PyThread_acquire_lock, WAIT_LOCK +from cpython.list cimport PyList_Append, PyList_GET_SIZE, PyList_Sort +from cpython.pythread cimport ( + WAIT_LOCK, + PyThread_acquire_lock, + PyThread_allocate_lock, + PyThread_release_lock, + PyThread_type_lock, +) _missing = object() @@ -51,7 +62,7 @@ cdef class group_reflected_property: def __set__(self, obj, value): setattr(obj, self._name, value) - def __del__(self, obj): + def __delete__(self, obj): delattr(obj, self._name) @@ -198,10 +209,10 @@ cdef class ContextStackManager: use_gevent = is_gevent_enabled() use_context = is_context_enabled() - if use_gevent: - tid = greenlet_get_ident() - elif use_context: + if use_context: tid = context_get_ident() + elif use_gevent: + tid = greenlet_get_ident() else: tid = thread_get_ident() diff --git a/logbook/__init__.py b/src/logbook/__init__.py similarity index 52% rename from logbook/__init__.py rename to src/logbook/__init__.py index a8ffc817..abccb331 100644 --- a/logbook/__init__.py +++ b/src/logbook/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook ~~~~~~~ @@ -11,23 +10,58 @@ """ import os + from .base import ( - LogRecord, Logger, LoggerGroup, NestedSetup, Processor, Flags, - get_level_name, lookup_level, dispatch_record, CRITICAL, ERROR, WARNING, - NOTICE, INFO, DEBUG, TRACE, NOTSET, set_datetime_format) + CRITICAL, + DEBUG, + ERROR, + INFO, + NOTICE, + NOTSET, + TRACE, + WARNING, + Flags, + Logger, + LoggerGroup, + LogRecord, + NestedSetup, + Processor, + dispatch_record, + get_level_name, + lookup_level, + set_datetime_format, +) from .handlers import ( - Handler, StreamHandler, FileHandler, MonitoringFileHandler, StderrHandler, - RotatingFileHandler, TimedRotatingFileHandler, TestHandler, MailHandler, - GMailHandler, SyslogHandler, NullHandler, NTEventLogHandler, - create_syshandler, StringFormatter, StringFormatterHandlerMixin, - HashingHandlerMixin, LimitingHandlerMixin, WrapperHandler, - FingersCrossedHandler, GroupHandler, GZIPCompressionHandler, BrotliCompressionHandler) -from . import compat + BrotliCompressionHandler, + FileHandler, + FingersCrossedHandler, + GMailHandler, + GroupHandler, + GZIPCompressionHandler, + Handler, + HashingHandlerMixin, + LimitingHandlerMixin, + MailHandler, + MonitoringFileHandler, + NTEventLogHandler, + NullHandler, + RotatingFileHandler, + StderrHandler, + StreamHandler, + StringFormatter, + StringFormatterHandlerMixin, + SyslogHandler, + TestHandler, + TimedRotatingFileHandler, + WrapperHandler, + create_syshandler, +) +from . import compat # isort: skip # create an anonymous default logger and provide all important # methods of that logger as global functions -_default_logger = Logger('Generic') +_default_logger = Logger("Generic") _default_logger.suppress_dispatcher = True trace = _default_logger.trace debug = _default_logger.debug @@ -44,7 +78,7 @@ # install a default global handler -if os.environ.get('LOGBOOK_INSTALL_DEFAULT_HANDLER'): +if os.environ.get("LOGBOOK_INSTALL_DEFAULT_HANDLER"): default_handler = StderrHandler() default_handler.push_application() diff --git a/src/logbook/__version__.py b/src/logbook/__version__.py new file mode 100644 index 00000000..e4adfb83 --- /dev/null +++ b/src/logbook/__version__.py @@ -0,0 +1 @@ +__version__ = "1.6.0" diff --git a/logbook/_fallback.py b/src/logbook/_fallback.py similarity index 86% rename from logbook/_fallback.py rename to src/logbook/_fallback.py index 10fed234..c011f2b3 100644 --- a/logbook/_fallback.py +++ b/src/logbook/_fallback.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook._fallback ~~~~~~~~~~~~~~~~~ @@ -9,11 +8,20 @@ :license: BSD, see LICENSE for more details. """ from itertools import count -from logbook.helpers import get_iterator_next_method + from logbook.concurrency import ( - thread_get_ident, greenlet_get_ident, thread_local, greenlet_local, - ThreadLock, GreenletRLock, is_gevent_enabled, ContextVar, context_get_ident, - is_context_enabled) + ContextVar, + GreenletRLock, + ThreadLock, + context_get_ident, + greenlet_get_ident, + greenlet_local, + is_context_enabled, + is_gevent_enabled, + thread_get_ident, + thread_local, +) +from logbook.helpers import get_iterator_next_method _missing = object() _MAX_CONTEXT_OBJECT_CACHE = 256 @@ -24,8 +32,9 @@ def group_reflected_property(name, default, fallback=_missing): value of the group if set. If there is no such group, the provided default is used. """ + def _get(self): - rv = getattr(self, '_' + name, _missing) + rv = getattr(self, "_" + name, _missing) if rv is not _missing and rv != fallback: return rv if self.group is None: @@ -33,15 +42,15 @@ def _get(self): return getattr(self.group, name) def _set(self, value): - setattr(self, '_' + name, value) + setattr(self, "_" + name, value) def _del(self): - delattr(self, '_' + name) - return property(_get, _set, _del) + delattr(self, "_" + name) + return property(_get, _set, _del) -class _StackBound(object): +class _StackBound: def __init__(self, obj, push, pop): self.__obj = obj self.__push = push @@ -55,7 +64,7 @@ def __exit__(self, exc_type, exc_value, tb): self.__pop() -class StackedObject(object): +class StackedObject: """Baseclass for all objects that provide stack manipulation operations. """ @@ -131,7 +140,7 @@ def applicationbound(self, _cls=_StackBound): return _cls(self, self.push_application, self.pop_application) -class ContextStackManager(object): +class ContextStackManager: """Helper class for context objects that manages a stack of objects. """ @@ -142,7 +151,7 @@ def __init__(self): self._thread_context = thread_local() self._greenlet_context_lock = GreenletRLock() self._greenlet_context = greenlet_local() - self._context_stack = ContextVar('stack') + self._context_stack = ContextVar("stack") self._cache = {} self._stackop = get_iterator_next_method(count()) @@ -153,10 +162,10 @@ def iter_context_objects(self): use_gevent = is_gevent_enabled() use_context = is_context_enabled() - if use_gevent: - tid = greenlet_get_ident() - elif use_context: + if use_context: tid = context_get_ident() + elif use_gevent: + tid = greenlet_get_ident() else: tid = thread_get_ident() @@ -165,10 +174,10 @@ def iter_context_objects(self): if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE: self._cache.clear() objects = self._global[:] - objects.extend(getattr(self._thread_context, 'stack', ())) + objects.extend(getattr(self._thread_context, "stack", ())) if use_gevent: - objects.extend(getattr(self._greenlet_context, 'stack', ())) + objects.extend(getattr(self._greenlet_context, "stack", ())) if use_context: objects.extend(self._context_stack.get([])) @@ -184,7 +193,7 @@ def push_greenlet(self, obj): # remote chance to conflict with thread ids self._cache.pop(greenlet_get_ident(), None) item = (self._stackop(), obj) - stack = getattr(self._greenlet_context, 'stack', None) + stack = getattr(self._greenlet_context, "stack", None) if stack is None: self._greenlet_context.stack = [item] else: @@ -197,8 +206,8 @@ def pop_greenlet(self): try: # remote chance to conflict with thread ids self._cache.pop(greenlet_get_ident(), None) - stack = getattr(self._greenlet_context, 'stack', None) - assert stack, 'no objects on stack' + stack = getattr(self._greenlet_context, "stack", None) + assert stack, "no objects on stack" return stack.pop()[1] finally: self._greenlet_context_lock.release() @@ -216,7 +225,7 @@ def push_context(self, obj): def pop_context(self): self._cache.pop(context_get_ident(), None) stack = self._context_stack.get(None) - assert stack, 'no objects on stack' + assert stack, "no objects on stack" return stack.pop()[1] def push_thread(self, obj): @@ -224,7 +233,7 @@ def push_thread(self, obj): try: self._cache.pop(thread_get_ident(), None) item = (self._stackop(), obj) - stack = getattr(self._thread_context, 'stack', None) + stack = getattr(self._thread_context, "stack", None) if stack is None: self._thread_context.stack = [item] else: @@ -236,8 +245,8 @@ def pop_thread(self): self._thread_context_lock.acquire() try: self._cache.pop(thread_get_ident(), None) - stack = getattr(self._thread_context, 'stack', None) - assert stack, 'no objects on stack' + stack = getattr(self._thread_context, "stack", None) + assert stack, "no objects on stack" return stack.pop()[1] finally: self._thread_context_lock.release() @@ -247,7 +256,7 @@ def push_application(self, obj): self._cache.clear() def pop_application(self): - assert self._global, 'no objects on application stack' + assert self._global, "no objects on application stack" popped = self._global.pop()[1] self._cache.clear() return popped diff --git a/logbook/_termcolors.py b/src/logbook/_termcolors.py similarity index 65% rename from logbook/_termcolors.py rename to src/logbook/_termcolors.py index 0c42b3e7..4a35383a 100644 --- a/logbook/_termcolors.py +++ b/src/logbook/_termcolors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook._termcolors ~~~~~~~~~~~~~~~~~~~ @@ -13,10 +12,26 @@ codes = {"": "", "reset": esc + "39;49;00m"} -dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue", - "purple", "teal", "lightgray"] -light_colors = ["darkgray", "red", "green", "yellow", "blue", - "fuchsia", "turquoise", "white"] +dark_colors = [ + "black", + "darkred", + "darkgreen", + "brown", + "darkblue", + "purple", + "teal", + "lightgray", +] +light_colors = [ + "darkgray", + "red", + "green", + "yellow", + "blue", + "fuchsia", + "turquoise", + "white", +] x = 30 for d, l in zip(dark_colors, light_colors): @@ -35,10 +50,11 @@ def _str_to_type(obj, strtype): """Helper for ansiformat and colorize""" if isinstance(obj, type(strtype)): return obj - return obj.encode('ascii') + return obj.encode("ascii") def colorize(color_key, text): """Returns an ANSI formatted text with the given color.""" - return (_str_to_type(codes[color_key], text) + text + - _str_to_type(codes["reset"], text)) + return ( + _str_to_type(codes[color_key], text) + text + _str_to_type(codes["reset"], text) + ) diff --git a/logbook/base.py b/src/logbook/base.py similarity index 83% rename from logbook/base.py rename to src/logbook/base.py index 15fa831c..f9fb4258 100644 --- a/logbook/base.py +++ b/src/logbook/base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.base ~~~~~~~~~~~~ @@ -16,25 +15,29 @@ from itertools import chain from weakref import ref as weakref -from logbook.concurrency import (greenlet_get_ident, thread_get_ident, - thread_get_name) - -from logbook.helpers import (PY2, cached_property, integer_types, iteritems, - parse_iso8601, string_types, to_safe_json, u, - xrange) +from logbook.concurrency import greenlet_get_ident, thread_get_ident, thread_get_name +from logbook.helpers import cached_property, parse_iso8601, to_safe_json _has_speedups = False try: - if os.environ.get('DISABLE_LOGBOOK_CEXT_AT_RUNTIME'): + if os.environ.get("DISABLE_LOGBOOK_CEXT_AT_RUNTIME"): raise ImportError("Speedups disabled via DISABLE_LOGBOOK_CEXT_AT_RUNTIME") from logbook._speedups import ( - _missing, group_reflected_property, ContextStackManager, StackedObject) + ContextStackManager, + StackedObject, + _missing, + group_reflected_property, + ) _has_speedups = True except ImportError: from logbook._fallback import ( - _missing, group_reflected_property, ContextStackManager, StackedObject) + ContextStackManager, + StackedObject, + _missing, + group_reflected_property, + ) _datetime_factory = datetime.utcnow @@ -60,8 +63,7 @@ def set_datetime_format(datetime_format): :py:obj:`datetime_format` (possibly time zone aware) This function defaults to creating datetime objects in UTC time, - using `datetime.utcnow() - `_, + using :func:`datetime.utcnow`, so that logbook logs all times in UTC time by default. This is recommended in case you have multiple software modules or instances running in different servers in different time zones, as @@ -79,7 +81,7 @@ def set_datetime_format(datetime_format): logbook.set_datetime_format("local") Other uses rely on your supplied :py:obj:`datetime_format`. - Using `pytz `_ for example:: + Using `pytz `_ for example:: from datetime import datetime import logbook @@ -98,13 +100,18 @@ def utc_tz(): elif callable(datetime_format): inst = datetime_format() if not isinstance(inst, datetime): - raise ValueError("Invalid callable value, valid callable " - "should return datetime.datetime instances, " - "not %r" % (type(inst),)) + raise ValueError( + "Invalid callable value, valid callable " + "should return datetime.datetime instances, " + "not %r" % (type(inst),) + ) _datetime_factory = datetime_format else: - raise ValueError("Invalid value %r. Valid values are 'utc' and " - "'local'." % (datetime_format,)) + raise ValueError( + "Invalid value %r. Valid values are 'utc' and " + "'local'." % (datetime_format,) + ) + # make sure to sync these up with _speedups.pyx CRITICAL = 15 @@ -117,32 +124,19 @@ def utc_tz(): NOTSET = 0 _level_names = { - CRITICAL: 'CRITICAL', - ERROR: 'ERROR', - WARNING: 'WARNING', - NOTICE: 'NOTICE', - INFO: 'INFO', - DEBUG: 'DEBUG', - TRACE: 'TRACE', - NOTSET: 'NOTSET' + CRITICAL: "CRITICAL", + ERROR: "ERROR", + WARNING: "WARNING", + NOTICE: "NOTICE", + INFO: "INFO", + DEBUG: "DEBUG", + TRACE: "TRACE", + NOTSET: "NOTSET", } -_reverse_level_names = dict((v, k) for (k, v) in iteritems(_level_names)) +_reverse_level_names = {v: k for (k, v) in _level_names.items()} _missing = object() -# on python 3 we can savely assume that frame filenames will be in -# unicode, on Python 2 we have to apply a trick. -if PY2: - def _convert_frame_filename(fn): - if isinstance(fn, unicode): - fn = fn.decode(sys.getfilesystemencoding() or 'utf-8', - 'replace') - return fn -else: - def _convert_frame_filename(fn): - return fn - - def level_name_property(): """Returns a property that reflects the level as name from the internal level attribute. @@ -153,18 +147,18 @@ def _get_level_name(self): def _set_level_name(self, level): self.level = lookup_level(level) - return property(_get_level_name, _set_level_name, - doc='The level as unicode string') + + return property(_get_level_name, _set_level_name, doc="The level as unicode string") def lookup_level(level): """Return the integer representation of a logging level.""" - if isinstance(level, integer_types): + if isinstance(level, int): return level try: return _reverse_level_names[level] except KeyError: - raise LookupError('unknown level name %s' % level) + raise LookupError("unknown level name %s" % level) def get_level_name(level): @@ -172,10 +166,10 @@ def get_level_name(level): try: return _level_names[level] except KeyError: - raise LookupError('unknown level') + raise LookupError("unknown level") -class _ExceptionCatcher(object): +class _ExceptionCatcher: """Helper for exception caught blocks.""" def __init__(self, logger, args, kwargs): @@ -189,7 +183,7 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, tb): if exc_type is not None: kwargs = self.kwargs.copy() - kwargs['exc_info'] = (exc_type, exc_value, tb) + kwargs["exc_info"] = (exc_type, exc_value, tb) self.logger.exception(*self.args, **kwargs) return True @@ -210,7 +204,7 @@ def push_greenlet(self): def pop_greenlet(self): """Pops the context object from the stack.""" popped = self.stack_manager.pop_greenlet() - assert popped is self, 'popped unexpected object' + assert popped is self, "popped unexpected object" def push_context(self): """Pushes the context object to the context stack.""" @@ -219,7 +213,7 @@ def push_context(self): def pop_context(self): """Pops the context object from the stack.""" popped = self.stack_manager.pop_context() - assert popped is self, 'popped unexpected object' + assert popped is self, "popped unexpected object" def push_thread(self): """Pushes the context object to the thread stack.""" @@ -228,7 +222,7 @@ def push_thread(self): def pop_thread(self): """Pops the context object from the stack.""" popped = self.stack_manager.pop_thread() - assert popped is self, 'popped unexpected object' + assert popped is self, "popped unexpected object" def push_application(self): """Pushes the context object to the application stack.""" @@ -237,7 +231,7 @@ def push_application(self): def pop_application(self): """Pops the context object from the stack.""" popped = self.stack_manager.pop_application() - assert popped is self, 'popped unexpected object' + assert popped is self, "popped unexpected object" class NestedSetup(StackedObject): @@ -306,14 +300,16 @@ def process(self, record): self.callback(record) -class _InheritedType(object): +class _InheritedType: __slots__ = () def __repr__(self): - return 'Inherit' + return "Inherit" def __reduce__(self): - return 'Inherit' + return "Inherit" + + Inherit = _InheritedType() @@ -346,6 +342,7 @@ class Flags(ContextObject): with Flags(errors='silent'): ... """ + stack_manager = ContextStackManager() def __init__(self, **flags): @@ -368,19 +365,31 @@ def _create_log_record(cls, dict): return cls.from_dict(dict) -class LogRecord(object): +class LogRecord: """A LogRecord instance represents an event being logged. LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args """ - _pullable_information = frozenset(( - 'func_name', 'module', 'filename', 'lineno', 'process_name', 'thread', - 'thread_name', 'greenlet', 'formatted_exception', 'message', - 'exception_name', 'exception_message' - )) - _noned_on_close = frozenset(('exc_info', 'frame', 'calling_frame')) + + _pullable_information = frozenset( + ( + "func_name", + "module", + "filename", + "lineno", + "process_name", + "thread", + "thread_name", + "greenlet", + "formatted_exception", + "message", + "exception_name", + "exception_message", + ) + ) + _noned_on_close = frozenset(("exc_info", "frame", "calling_frame")) #: can be overriden by a handler to not close the record. This could #: lead to memory leaks so it should be used carefully. @@ -402,9 +411,19 @@ class LogRecord(object): #: information that becomes unavailable on close. information_pulled = False - def __init__(self, channel, level, msg, args=None, kwargs=None, - exc_info=None, extra=None, frame=None, dispatcher=None, - frame_correction=0): + def __init__( + self, + channel, + level, + msg, + args=None, + kwargs=None, + exc_info=None, + extra=None, + frame=None, + dispatcher=None, + frame_correction=0, + ): #: the name of the logger that created it or any other textual #: channel description. This is a descriptive name and can be #: used for filtering. @@ -431,8 +450,7 @@ def __init__(self, channel, level, msg, args=None, kwargs=None, #: where custom log processors can attach custom context sensitive #: data. - # TODO: Replace the lambda with str when we remove support for python 2 - self.extra = defaultdict(lambda: u'', extra or ()) + self.extra = defaultdict(str, extra or ()) #: If available, optionally the interpreter frame that pulled the #: heavy init. This usually points to somewhere in the dispatcher. #: Might not be available for all calls and is removed when the log @@ -460,14 +478,20 @@ def heavy_init(self): """ if self.heavy_initialized: return - assert not self.late, 'heavy init is no longer possible' + assert not self.late, "heavy init is no longer possible" self.heavy_initialized = True self.process = os.getpid() self.time = _datetime_factory() - if self.frame is None and Flags.get_flag('introspection', True): + if self.frame is None and Flags.get_flag("introspection", True): self.frame = sys._getframe(1) if self.exc_info is True: self.exc_info = sys.exc_info() + if isinstance(self.exc_info, BaseException): + self.exc_info = ( + type(self.exc_info), + self.exc_info, + self.exc_info.__traceback__, + ) def pull_information(self): """A helper function that pulls all frame-related information into @@ -504,11 +528,11 @@ def to_dict(self, json_safe=False): """ self.pull_information() rv = {} - for key, value in iteritems(self.__dict__): - if key[:1] != '_' and key not in self._noned_on_close: + for key, value in self.__dict__.items(): + if key[:1] != "_" and key not in self._noned_on_close: rv[key] = value # the extra dict is exported as regular dict - rv['extra'] = dict(rv['extra']) + rv["extra"] = dict(rv["extra"]) if json_safe: return to_safe_json(rv) return rv @@ -531,11 +555,10 @@ def update_from_dict(self, d): setattr(self, key, None) self._information_pulled = True self._channel = None - if isinstance(self.time, string_types): + if isinstance(self.time, str): self.time = parse_iso8601(self.time) - # TODO: Replace the lambda with str when we remove support for python 2` - self.extra = defaultdict(lambda: u'', self.extra) + self.extra = defaultdict(str, self.extra) return self def _format_message(self, msg, *args, **kwargs): @@ -551,24 +574,24 @@ def message(self): return self.msg try: try: - return self._format_message(self.msg, *self.args, - **self.kwargs) + return self._format_message(self.msg, *self.args, **self.kwargs) except UnicodeDecodeError: # Assume an unicode message but mixed-up args - msg = self.msg.encode('utf-8', 'replace') + msg = self.msg.encode("utf-8", "replace") return self._format_message(msg, *self.args, **self.kwargs) except (UnicodeEncodeError, AttributeError): # we catch AttributeError since if msg is bytes, # it won't have the 'format' method - if (sys.exc_info()[0] is AttributeError - and (PY2 or not isinstance(self.msg, bytes))): + if sys.exc_info()[0] is AttributeError and not isinstance( + self.msg, bytes + ): # this is not the case we thought it is... raise # Assume encoded message with unicode args. # The assumption of utf8 as input encoding is just a guess, # but this codepath is unlikely (if the message is a constant # string in the caller's source file) - msg = self.msg.decode('utf-8', 'replace') + msg = self.msg.decode("utf-8", "replace") return self._format_message(msg, *self.args, **self.kwargs) except Exception: @@ -577,16 +600,19 @@ def message(self): # access to the frame. But there is not much we can do about # that. e = sys.exc_info()[1] - errormsg = ('Could not format message with provided ' - 'arguments: {err}\n msg={msg!r}\n ' - 'args={args!r} \n kwargs={kwargs!r}.\n' - 'Happened in file {file}, line {lineno}').format( - err=e, msg=self.msg, args=self.args, - kwargs=self.kwargs, file=self.filename, - lineno=self.lineno + errormsg = ( + "Could not format message with provided " + "arguments: {err}\n msg={msg!r}\n " + "args={args!r} \n kwargs={kwargs!r}.\n" + "Happened in file {file}, line {lineno}" + ).format( + err=e, + msg=self.msg, + args=self.args, + kwargs=self.kwargs, + file=self.filename, + lineno=self.lineno, ) - if PY2: - errormsg = errormsg.encode('utf-8') raise TypeError(errormsg) level_name = level_name_property() @@ -601,7 +627,7 @@ def calling_frame(self): while frm is not None and frm.f_globals is globs: frm = frm.f_back - for _ in xrange(self.frame_correction): + for _ in range(self.frame_correction): if frm is None: break @@ -627,7 +653,7 @@ def module(self): """ cf = self.calling_frame if cf is not None: - return cf.f_globals.get('__name__') + return cf.f_globals.get("__name__") @cached_property def filename(self): @@ -637,9 +663,9 @@ def filename(self): cf = self.calling_frame if cf is not None: fn = cf.f_code.co_filename - if fn[:1] == '<' and fn[-1:] == '>': + if fn[:1] == "<" and fn[-1:] == ">": return fn - return _convert_frame_filename(os.path.abspath(fn)) + return os.path.abspath(fn) @cached_property def lineno(self): @@ -681,7 +707,7 @@ def process_name(self): # yet - e.g. if a custom import hook causes third-party code # to run when multiprocessing calls import. See issue 8200 # for an example - mp = sys.modules.get('multiprocessing') + mp = sys.modules.get("multiprocessing") if mp is not None: # pragma: no cover try: return mp.current_process().name @@ -694,9 +720,7 @@ def formatted_exception(self): in case there was any. """ if self.exc_info is not None and self.exc_info != (None, None, None): - rv = ''.join(traceback.format_exception(*self.exc_info)) - if PY2: - rv = rv.decode('utf-8', 'replace') + rv = "".join(traceback.format_exception(*self.exc_info)) return rv.rstrip() @cached_property @@ -704,25 +728,19 @@ def exception_name(self): """The name of the exception.""" if self.exc_info is not None: cls = self.exc_info[0] - return u(cls.__module__ + '.' + cls.__name__) + return cls.__module__ + "." + cls.__name__ @property def exception_shortname(self): """An abbreviated exception name (no import path)""" - return self.exception_name.rsplit('.')[-1] + return self.exception_name.rsplit(".")[-1] @cached_property def exception_message(self): """The message of the exception.""" if self.exc_info is not None: val = self.exc_info[1] - try: - if PY2: - return unicode(val) - else: - return str(val) - except UnicodeError: - return str(val).decode('utf-8', 'replace') + return str(val) @property def dispatcher(self): @@ -736,7 +754,7 @@ def dispatcher(self): return self._dispatcher() -class LoggerMixin(object): +class LoggerMixin: """This mixin class defines and implements the "usual" logger interface (i.e. the descriptive logging functions). @@ -802,11 +820,11 @@ def exception(self, *args, **kwargs): if self.disabled or ERROR < self.level: return if not args: - args = ('Uncaught exception occurred',) - if 'exc_info' not in kwargs: + args = ("Uncaught exception occurred",) + if "exc_info" not in kwargs: exc_info = sys.exc_info() - assert exc_info[0] is not None, 'no exception occurred' - kwargs.setdefault('exc_info', sys.exc_info()) + assert exc_info[0] is not None, "no exception occurred" + kwargs.setdefault("exc_info", sys.exc_info()) return self.error(*args, **kwargs) def critical(self, *args, **kwargs): @@ -837,7 +855,7 @@ def catch_exceptions(self, *args, **kwargs): execute_code_that_might_fail() """ if not args: - args = ('Uncaught exception occurred',) + args = ("Uncaught exception occurred",) return _ExceptionCatcher(self, args, kwargs) def enable(self): @@ -851,7 +869,7 @@ def enable(self): try: self.disabled = False except AttributeError: - raise AttributeError('The disabled property is read-only.') + raise AttributeError("The disabled property is read-only.") def disable(self): """Convenience method to disable this logger. @@ -864,17 +882,18 @@ def disable(self): try: self.disabled = True except AttributeError: - raise AttributeError('The disabled property is read-only.') + raise AttributeError("The disabled property is read-only.") def _log(self, level, args, kwargs): - exc_info = kwargs.pop('exc_info', None) - extra = kwargs.pop('extra', None) - frame_correction = kwargs.pop('frame_correction', 0) - self.make_record_and_handle(level, args[0], args[1:], kwargs, - exc_info, extra, frame_correction) + exc_info = kwargs.pop("exc_info", None) + extra = kwargs.pop("extra", None) + frame_correction = kwargs.pop("frame_correction", 0) + self.make_record_and_handle( + level, args[0], args[1:], kwargs, exc_info, extra, frame_correction + ) -class RecordDispatcher(object): +class RecordDispatcher: """A record dispatcher is the internal base class that implements the logic used by the :class:`~logbook.Logger`. """ @@ -893,8 +912,8 @@ def __init__(self, name=None, level=NOTSET): #: the level of the record dispatcher as integer self.level = level - disabled = group_reflected_property('disabled', False) - level = group_reflected_property('level', NOTSET, fallback=NOTSET) + disabled = group_reflected_property("disabled", False) + level = group_reflected_property("level", NOTSET, fallback=NOTSET) def handle(self, record): """Call the handlers for the specified record. This is @@ -907,8 +926,9 @@ def handle(self, record): if not self.disabled and record.level >= self.level: self.call_handlers(record) - def make_record_and_handle(self, level, msg, args, kwargs, exc_info, - extra, frame_correction): + def make_record_and_handle( + self, level, msg, args, kwargs, exc_info, extra, frame_correction + ): """Creates a record from some given arguments and heads it over to the handling system. """ @@ -921,8 +941,18 @@ def make_record_and_handle(self, level, msg, args, kwargs, exc_info, if not self.suppress_dispatcher: channel = self - record = LogRecord(self.name, level, msg, args, kwargs, exc_info, - extra, None, channel, frame_correction) + record = LogRecord( + self.name, + level, + msg, + args, + kwargs, + exc_info, + extra, + None, + channel, + frame_correction, + ) # after handling the log record is closed which will remove some # referenes that would require a GC run on cpython. This includes @@ -957,8 +987,9 @@ def call_handlers(self, record): # Both logger attached handlers as well as context specific # handlers are handled one after another. The latter also # include global handlers. - for handler in chain(self.handlers, - Handler.stack_manager.iter_context_objects()): + for handler in chain( + self.handlers, Handler.stack_manager.iter_context_objects() + ): # skip records that this handler is not interested in based # on the record and handler level or in case this method was # overridden on some custom logic. @@ -985,8 +1016,7 @@ def call_handlers(self, record): # record. The impact is that filters are slower than the # handler's should_handle function in case there is no default # handler that would handle the record (delayed init). - if (handler.filter is not None - and not handler.filter(record, handler)): + if handler.filter is not None and not handler.filter(record, handler): continue # We might have a filter, so now that we know we *should* handle @@ -1026,7 +1056,7 @@ class Logger(RecordDispatcher, LoggerMixin): """ -class LoggerGroup(object): +class LoggerGroup: """A LoggerGroup represents a group of loggers. It cannot emit log messages on its own but it can be used to set the disabled flag and log level of all loggers in the group. @@ -1057,7 +1087,7 @@ def __init__(self, loggers=None, level=NOTSET, processor=None): def add_logger(self, logger): """Adds a logger to this group.""" - assert logger.group is None, 'Logger already belongs to a group' + assert logger.group is None, "Logger already belongs to a group" logger.group = self self.loggers.append(logger) @@ -1088,7 +1118,7 @@ def enable(self, force=False): self.disabled = False if force: for logger in self.loggers: - rv = getattr(logger, '_disabled', _missing) + rv = getattr(logger, "_disabled", _missing) if rv is not _missing: logger.enable() @@ -1106,7 +1136,7 @@ def disable(self, force=False): self.disabled = True if force: for logger in self.loggers: - rv = getattr(logger, '_disabled', _missing) + rv = getattr(logger, "_disabled", _missing) if rv is not _missing: logger.disable() @@ -1121,5 +1151,6 @@ def dispatch_record(record): """ _default_dispatcher.call_handlers(record) + # at that point we are safe to import handler -from logbook.handlers import Handler # isort:skip +from logbook.handlers import Handler # isort:skip diff --git a/logbook/compat.py b/src/logbook/compat.py similarity index 74% rename from logbook/compat.py rename to src/logbook/compat.py index 602a11af..c79fef11 100644 --- a/logbook/compat.py +++ b/src/logbook/compat.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.compat ~~~~~~~~~~~~~~ @@ -9,16 +8,13 @@ :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. :license: BSD, see LICENSE for more details. """ -import collections import logging import sys import warnings -from datetime import date, datetime +from collections.abc import Mapping +from datetime import date, datetime, timezone import logbook -from logbook.helpers import u, string_types, iteritems, collections_abc - -_epoch_ord = date(1970, 1, 1).toordinal() def redirect_logging(set_root_logger_level=True): @@ -36,7 +32,7 @@ def redirect_logging(set_root_logger_level=True): logging.root.setLevel(logging.DEBUG) -class redirected_logging(object): +class redirected_logging: """Temporarily redirects logging for all threads and reverts it later to the old handlers. Mainly used by the internal unittests:: @@ -45,6 +41,7 @@ class redirected_logging(object): with redirected_logging(): ... """ + def __init__(self, set_root_logger_level=True): self.old_handlers = logging.root.handlers[:] self.old_level = logging.root.level @@ -62,7 +59,6 @@ def end(self, etype=None, evalue=None, tb=None): class LoggingCompatRecord(logbook.LogRecord): - def _format_message(self, msg, *args, **kwargs): if kwargs: assert not args @@ -102,11 +98,28 @@ def find_extra(self, old_record): extra dictionaries. """ rv = vars(old_record).copy() - for key in ('name', 'msg', 'args', 'levelname', 'levelno', - 'pathname', 'filename', 'module', 'exc_info', - 'exc_text', 'lineno', 'funcName', 'created', - 'msecs', 'relativeCreated', 'thread', 'threadName', - 'greenlet', 'processName', 'process'): + for key in ( + "name", + "msg", + "args", + "levelname", + "levelno", + "pathname", + "filename", + "module", + "exc_info", + "exc_text", + "lineno", + "funcName", + "created", + "msecs", + "relativeCreated", + "thread", + "threadName", + "greenlet", + "processName", + "process", + ): rv.pop(key, None) return rv @@ -114,9 +127,11 @@ def find_caller(self, old_record): """Tries to find the caller that issued the call.""" frm = sys._getframe(2) while frm is not None: - if (frm.f_globals is globals() or - frm.f_globals is logbook.base.__dict__ or - frm.f_globals is logging.__dict__): + if ( + frm.f_globals is globals() + or frm.f_globals is logbook.base.__dict__ + or frm.f_globals is logging.__dict__ + ): frm = frm.f_back else: return frm @@ -133,15 +148,19 @@ def convert_record(self, old_record): kwargs = None # Logging allows passing a mapping object, in which case args will be a mapping. - if isinstance(args, collections_abc.Mapping): + if isinstance(args, Mapping): kwargs = args args = None - record = LoggingCompatRecord(old_record.name, - self.convert_level(old_record.levelno), - old_record.msg, args, - kwargs, old_record.exc_info, - self.find_extra(old_record), - self.find_caller(old_record)) + record = LoggingCompatRecord( + old_record.name, + self.convert_level(old_record.levelno), + old_record.msg, + args, + kwargs, + old_record.exc_info, + self.find_extra(old_record), + self.find_caller(old_record), + ) record.time = self.convert_time(old_record.created) return record @@ -164,12 +183,11 @@ class LoggingHandler(logbook.Handler): warn('This goes to logging') """ - def __init__(self, logger=None, level=logbook.NOTSET, filter=None, - bubble=False): + def __init__(self, logger=None, level=logbook.NOTSET, filter=None, bubble=False): logbook.Handler.__init__(self, level, filter, bubble) if logger is None: logger = logging.getLogger() - elif isinstance(logger, string_types): + elif isinstance(logger, str): logger = logging.getLogger(logger) self.logger = logger @@ -193,28 +211,24 @@ def convert_level(self, level): def convert_time(self, dt): """Converts a datetime object into a timestamp.""" - year, month, day, hour, minute, second = dt.utctimetuple()[:6] - days = date(year, month, 1).toordinal() - _epoch_ord + day - 1 - hours = days * 24 + hour - minutes = hours * 60 + minute - seconds = minutes * 60 + second - return seconds + if dt.tzinfo is None: + # Logbook uses naive datetimes to represent UTC (utcnow) + return dt.replace(tzinfo=timezone.utc).timestamp() + return dt.timestamp() def convert_record(self, old_record): """Converts a record from logbook to logging.""" - if sys.version_info >= (2, 5): - # make sure 2to3 does not screw this up - optional_kwargs = {'func': getattr(old_record, 'func_name')} - else: - optional_kwargs = {} - record = logging.LogRecord(old_record.channel, - self.convert_level(old_record.level), - old_record.filename, - old_record.lineno, - old_record.message, - (), old_record.exc_info, - **optional_kwargs) - for key, value in iteritems(old_record.extra): + record = logging.LogRecord( + old_record.channel, + self.convert_level(old_record.level), + old_record.filename, + old_record.lineno, + old_record.message, + (), + old_record.exc_info, + func=old_record.func_name, + ) + for key, value in old_record.extra.items(): record.__dict__.setdefault(key, value) record.created = self.convert_time(old_record.time) return record @@ -235,7 +249,7 @@ def redirect_warnings(): redirected_warnings().__enter__() -class redirected_warnings(object): +class redirected_warnings: """A context manager that copies and restores the warnings filter upon exiting the context, and logs warnings using the logbook system. @@ -257,15 +271,12 @@ def __init__(self): self._entered = False def message_to_unicode(self, message): - try: - return u(str(message)) - except UnicodeError: - return str(message).decode('utf-8', 'replace') + return str(message) def make_record(self, message, exception, filename, lineno): category = exception.__name__ - if exception.__module__ not in ('exceptions', 'builtins'): - category = exception.__module__ + '.' + category + if exception.__module__ not in ("exceptions", "builtins"): + category = exception.__module__ + "." + category rv = logbook.LogRecord(category, logbook.WARNING, message) # we don't know the caller, but we get that information from the # warning system. Just attach them. @@ -281,11 +292,11 @@ def start(self): warnings.filters = self._filters[:] self._showwarning = warnings.showwarning - def showwarning(message, category, filename, lineno, - file=None, line=None): + def showwarning(message, category, filename, lineno, file=None, line=None): message = self.message_to_unicode(message) record = self.make_record(message, category, filename, lineno) logbook.dispatch_record(record) + warnings.showwarning = showwarning def end(self, etype=None, evalue=None, tb=None): diff --git a/logbook/concurrency.py b/src/logbook/concurrency.py similarity index 64% rename from logbook/concurrency.py rename to src/logbook/concurrency.py index cd0fcb69..e429fb7b 100644 --- a/logbook/concurrency.py +++ b/src/logbook/concurrency.py @@ -1,3 +1,7 @@ +from contextvars import ContextVar +from itertools import count +from threading import current_thread + has_gevent = True use_gevent = False try: @@ -14,6 +18,7 @@ def _disable_gevent(): # for testing def is_gevent_enabled(): global use_gevent return use_gevent + except ImportError: has_gevent = False @@ -27,26 +32,23 @@ def is_gevent_enabled(): return False +def thread_get_name(): + return current_thread().name + + if has_gevent: from gevent.monkey import get_original as _get_original - ThreadLock = _get_original('threading', 'Lock') - ThreadRLock = _get_original('threading', 'RLock') - try: - thread_get_ident = _get_original('threading', 'get_ident') - except AttributeError: - # In 2.7, this is called _get_ident - thread_get_ident = _get_original('threading', '_get_ident') - thread_local = _get_original('threading', 'local') - from gevent.thread import get_ident as greenlet_get_ident + ThreadLock = _get_original("threading", "Lock") + ThreadRLock = _get_original("threading", "RLock") + thread_get_ident = _get_original("threading", "get_ident") + thread_local = _get_original("threading", "local") + from gevent.local import local as greenlet_local from gevent.lock import BoundedSemaphore - from gevent.threading import __threading__ - - def thread_get_name(): - return __threading__.currentThread().getName() + from gevent.thread import get_ident as greenlet_get_ident - class GreenletRLock(object): + class GreenletRLock: def __init__(self): self._thread_local = thread_local() self._owner = None @@ -55,8 +57,11 @@ def __init__(self): def __repr__(self): owner = self._owner - return "<%s owner=%r count=%d>" % (self.__class__.__name__, owner, - self._count) + return "<%s owner=%r count=%d>" % ( + self.__class__.__name__, + owner, + self._count, + ) def acquire(self, blocking=1): tid = thread_get_ident() @@ -119,7 +124,7 @@ def __exit__(self, t, v, tb): self.release() def _get_greenlet_lock(self): - if not hasattr(self._thread_local, 'greenlet_lock'): + if not hasattr(self._thread_local, "greenlet_lock"): greenlet_lock = self._thread_local.greenlet_lock = BoundedSemaphore(1) else: greenlet_lock = self._thread_local.greenlet_lock @@ -127,24 +132,18 @@ def _get_greenlet_lock(self): def _is_owned(self): return self._owner == (thread_get_ident(), greenlet_get_ident()) -else: - from threading import ( - Lock as ThreadLock, RLock as ThreadRLock, currentThread) - try: - from thread import ( - get_ident as thread_get_ident, _local as thread_local) - except ImportError: - from _thread import ( - get_ident as thread_get_ident, _local as thread_local) - def thread_get_name(): - return currentThread().getName() +else: + from threading import Lock as ThreadLock + from threading import RLock as ThreadRLock + from threading import get_ident as thread_get_ident + from threading import local as thread_local greenlet_get_ident = thread_get_ident greenlet_local = thread_local - class GreenletRLock(object): + class GreenletRLock: def acquire(self): pass @@ -166,51 +165,22 @@ def new_fine_grained_lock(): return ThreadRLock() -has_contextvars = True -try: - import contextvars -except ImportError: - has_contextvars = False - -if has_contextvars: - from contextvars import ContextVar - from itertools import count - - context_ident_counter = count() - context_ident = ContextVar('context_ident') - - def context_get_ident(): - try: - return context_ident.get() - except LookupError: - ident = 'context-%s' % next(context_ident_counter) - context_ident.set(ident) - return ident - - def is_context_enabled(): - try: - context_ident.get() - return True - except LookupError: - return False +context_ident_counter = count() +context_ident = ContextVar("context_ident") -else: - class ContextVar(object): - def __init__(self, name): - self.name = name - self.local = thread_local() - def set(self, value): - self.local = value - - def get(self, default=None): - if self.local is None: - return default - - return default +def context_get_ident(): + try: + return context_ident.get() + except LookupError: + ident = "context-%s" % next(context_ident_counter) + context_ident.set(ident) + return ident - def context_get_ident(): - return 1 - def is_context_enabled(): +def is_context_enabled(): + try: + context_ident.get() + return True + except LookupError: return False diff --git a/logbook/handlers.py b/src/logbook/handlers.py similarity index 77% rename from logbook/handlers.py rename to src/logbook/handlers.py index 9c28a83c..d39662bc 100644 --- a/logbook/handlers.py +++ b/src/logbook/handlers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.handlers ~~~~~~~~~~~~~~~~ @@ -8,40 +7,49 @@ :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. :license: BSD, see LICENSE for more details. """ -import io -import os -import re -import sys -import stat import errno -import socket import gzip import math -try: - from hashlib import sha1 -except ImportError: - from sha import new as sha1 +import os +import re +import socket +import stat +import sys import traceback -import collections -from datetime import datetime, timedelta from collections import deque +from collections.abc import Iterable, Mapping +from datetime import datetime, timedelta +from hashlib import sha1 from textwrap import dedent from logbook.base import ( - CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG, TRACE, NOTSET, level_name_property, - _missing, lookup_level, Flags, ContextObject, ContextStackManager, - _datetime_factory) -from logbook.helpers import ( - rename, b, _is_text_stream, is_unicode, PY2, zip, xrange, string_types, collections_abc, - integer_types, reraise, u, with_metaclass) + CRITICAL, + DEBUG, + ERROR, + INFO, + NOTICE, + NOTSET, + TRACE, + WARNING, + ContextObject, + ContextStackManager, + Flags, + _datetime_factory, + _missing, + level_name_property, + lookup_level, +) from logbook.concurrency import new_fine_grained_lock +from logbook.helpers import rename -DEFAULT_FORMAT_STRING = u( - '[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] ' - '{record.level_name}: {record.channel}: {record.message}') +DEFAULT_FORMAT_STRING = ( + "[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] " + "{record.level_name}: {record.channel}: {record.message}" +) -SYSLOG_FORMAT_STRING = u('{record.channel}: {record.message}') -NTLOG_FORMAT_STRING = dedent(u(''' +SYSLOG_FORMAT_STRING = "{record.channel}: {record.message}" +NTLOG_FORMAT_STRING = dedent( + """ Message Level: {record.level_name} Location: {record.filename}:{record.lineno} Module: {record.module} @@ -51,10 +59,12 @@ Event provided Message: {record.message} - ''')).lstrip() + """ +).lstrip() -TEST_FORMAT_STRING = u('[{record.level_name}] {record.channel}: {record.message}') -MAIL_FORMAT_STRING = dedent(u(''' +TEST_FORMAT_STRING = "[{record.level_name}] {record.channel}: {record.message}" +MAIL_FORMAT_STRING = dedent( + """ Subject: {handler.subject} Message type: {record.level_name} @@ -66,15 +76,18 @@ Message: {record.message} - ''')).lstrip() + """ +).lstrip() -MAIL_RELATED_FORMAT_STRING = dedent(u(''' +MAIL_RELATED_FORMAT_STRING = dedent( + """ Message type: {record.level_name} Location: {record.filename}:{record.lineno} Module: {record.module} Function: {record.func_name} {record.message} - ''')).lstrip() + """ +).lstrip() SYSLOG_PORT = 514 @@ -86,7 +99,7 @@ def create_syshandler(application_name, level=NOTSET): this creates a :class:`SyslogHandler`, on Windows sytems it will create a :class:`NTEventLogHandler`. """ - if os.name == 'nt': + if os.name == "nt": return NTEventLogHandler(application_name, level=level) return SyslogHandler(application_name, level=level) @@ -101,8 +114,13 @@ class _HandlerType(type): def __new__(cls, name, bases, d): # aha, that thing has a custom close method. We will need a magic # __del__ for it to be called on cleanup. - if (bases != (ContextObject,) and 'close' in d and '__del__' not in d - and not any(hasattr(x, '__del__') for x in bases)): + if ( + bases != (ContextObject,) + and "close" in d + and "__del__" not in d + and not any(hasattr(x, "__del__") for x in bases) + ): + def _magic_del(self): try: self.close() @@ -110,11 +128,12 @@ def _magic_del(self): # del is also invoked when init fails, so we better just # ignore any exception that might be raised here pass - d['__del__'] = _magic_del + + d["__del__"] = _magic_del return type.__new__(cls, name, bases, d) -class Handler(with_metaclass(_HandlerType), ContextObject): +class Handler(ContextObject, metaclass=_HandlerType): """Handler instances dispatch logging events to specific destinations. The base handler class. Acts as a placeholder which defines the Handler @@ -157,6 +176,7 @@ class Handler(with_metaclass(_HandlerType), ContextObject): If gevent is enabled, the handler is aliased to `greenletbound`. """ + stack_manager = ContextStackManager() #: a flag for this handler that can be set to `True` for handlers that @@ -290,14 +310,17 @@ def handle_error(self, record, exc_info): Check :class:`Flags` for more information. """ try: - behaviour = Flags.get_flag('errors', 'print') - if behaviour == 'raise': - reraise(exc_info[0], exc_info[1], exc_info[2]) - elif behaviour == 'print': + behaviour = Flags.get_flag("errors", "print") + if behaviour == "raise": + raise exc_info[1] + elif behaviour == "print": traceback.print_exception(*(exc_info + (None, sys.stderr))) - sys.stderr.write('Logged from file %s, line %s\n' % ( - record.filename, record.lineno)) - except IOError: + sys.stderr.write( + "Logged from file {}, line {}\n".format( + record.filename, record.lineno + ) + ) + except OSError: pass @@ -312,11 +335,11 @@ class NullHandler(Handler): NullHandlers swallow all logs sent to them, and do not bubble them onwards. """ + blackhole = True def __init__(self, level=NOTSET, filter=None): - super(NullHandler, self).__init__(level=level, filter=filter, - bubble=False) + super().__init__(level=level, filter=filter, bubble=False) class WrapperHandler(Handler): @@ -331,7 +354,7 @@ class WrapperHandler(Handler): #: a set of direct attributes that are not forwarded to the inner #: handler. This has to be extended as necessary. - _direct_attrs = frozenset(['handler']) + _direct_attrs = frozenset(["handler"]) def __init__(self, handler): self.handler = handler @@ -345,7 +368,7 @@ def __setattr__(self, name, value): setattr(self.handler, name, value) -class StringFormatter(object): +class StringFormatter: """Many handlers format the log entries to text format. This is done by a callable that is passed a log record and returns an unicode string. The default formatter for this is implemented as a class so @@ -372,12 +395,12 @@ def format_record(self, record, handler): except UnicodeEncodeError: # self._formatter is a str, but some of the record items # are unicode - fmt = self._formatter.decode('ascii', 'replace') + fmt = self._formatter.decode("ascii", "replace") return fmt.format(record=record, handler=handler) except UnicodeDecodeError: # self._formatter is unicode, but some of the record items # are non-ascii str - fmt = self._formatter.encode('ascii', 'replace') + fmt = self._formatter.encode("ascii", "replace") return fmt.format(record=record, handler=handler) def format_exception(self, record): @@ -387,11 +410,11 @@ def __call__(self, record, handler): line = self.format_record(record, handler) exc = self.format_exception(record) if exc: - line += u('\n') + exc + line += "\n" + exc return line -class StringFormatterHandlerMixin(object): +class StringFormatterHandlerMixin: """A mixin for handlers that provides a default integration for the :class:`~logbook.StringFormatter` class. This is used for all handlers by default that log text to a destination. @@ -426,16 +449,16 @@ def _set_format_string(self, value): del _get_format_string, _set_format_string -class HashingHandlerMixin(object): +class HashingHandlerMixin: """Mixin class for handlers that are hashing records.""" def hash_record_raw(self, record): """Returns a hashlib object with the hash of the record.""" hash = sha1() - hash.update(('%d\x00' % record.level).encode('ascii')) - hash.update((record.channel or u('')).encode('utf-8') + b('\x00')) - hash.update(record.filename.encode('utf-8') + b('\x00')) - hash.update(b(str(record.lineno))) + hash.update(("%d\x00" % record.level).encode("ascii")) + hash.update((record.channel or "").encode("utf-8") + b"\x00") + hash.update(record.filename.encode("utf-8") + b"\x00") + hash.update(str(record.lineno).encode("utf-8")) return hash def hash_record(self, record): @@ -447,8 +470,6 @@ def hash_record(self, record): """ return self.hash_record_raw(record).hexdigest() -_NUMBER_TYPES = integer_types + (float,) - class LimitingHandlerMixin(HashingHandlerMixin): """Mixin class for handlers that want to limit emitting records. @@ -469,7 +490,7 @@ def __init__(self, record_limit, record_delta): self._record_limits = {} if record_delta is None: record_delta = timedelta(seconds=60) - elif isinstance(record_delta, _NUMBER_TYPES): + elif isinstance(record_delta, (int, float)): record_delta = timedelta(seconds=record_delta) self.record_delta = record_delta @@ -496,11 +517,12 @@ def check_delivery(self, record): first_count = last_count old_count = suppression_count - if (not suppression_count and - len(self._record_limits) >= self.max_record_cache): + if ( + not suppression_count + and len(self._record_limits) >= self.max_record_cache + ): cache_items = sorted(self._record_limits.items()) - del cache_items[:int(self._record_limits) - * self.record_cache_prune] + del cache_items[: int(self._record_limits) * self.record_cache_prune] self._record_limits = dict(cache_items) self._record_limits[hash] = (first_count, old_count + 1) @@ -529,8 +551,15 @@ class StreamHandler(Handler, StringFormatterHandlerMixin): passed that was opened in binary mode. """ - def __init__(self, stream, level=NOTSET, format_string=None, - encoding=None, filter=None, bubble=False): + def __init__( + self, + stream, + level=NOTSET, + format_string=None, + encoding=None, + filter=None, + bubble=False, + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) self.encoding = encoding @@ -559,20 +588,12 @@ def close(self): def flush(self): """Flushes the inner stream.""" - if self.stream is not None and hasattr(self.stream, 'flush'): + if self.stream is not None and hasattr(self.stream, "flush"): self.stream.flush() def encode(self, msg): """Encodes the message to the stream encoding.""" - stream = self.stream - rv = msg + '\n' - if ((PY2 and is_unicode(rv)) or - not (PY2 or is_unicode(rv) or _is_text_stream(stream))): - enc = self.encoding - if enc is None: - enc = getattr(stream, 'encoding', None) or 'utf-8' - rv = rv.encode(enc, 'replace') - return rv + return msg + "\n" def write(self, item): """Writes a bytestring to the stream.""" @@ -602,13 +623,23 @@ class FileHandler(StreamHandler): :class:`~logbook.FingersCrossedHandler` or something similar. """ - def __init__(self, filename, mode='a', encoding=None, level=NOTSET, - format_string=None, delay=False, filter=None, bubble=False): + def __init__( + self, + filename, + mode="a", + encoding=None, + level=NOTSET, + format_string=None, + delay=False, + filter=None, + bubble=False, + ): if encoding is None: - encoding = 'utf-8' - StreamHandler.__init__(self, None, level, format_string, - encoding, filter, bubble) - self._filename = filename + encoding = "utf-8" + StreamHandler.__init__( + self, None, level, format_string, encoding, filter, bubble + ) + self._filename = os.fspath(filename) self._mode = mode if delay: self.stream = None @@ -618,7 +649,7 @@ def __init__(self, filename, mode='a', encoding=None, level=NOTSET, def _open(self, mode=None): if mode is None: mode = self._mode - self.stream = io.open(self._filename, mode, encoding=self.encoding) + self.stream = open(self._filename, mode, encoding=self.encoding) def write(self, item): self.ensure_stream_is_open() @@ -649,17 +680,35 @@ def ensure_stream_is_open(self): class GZIPCompressionHandler(FileHandler): - def __init__(self, filename, encoding=None, level=NOTSET, - format_string=None, delay=False, filter=None, bubble=False, compression_quality=9): - + def __init__( + self, + filename, + encoding=None, + level=NOTSET, + format_string=None, + delay=False, + filter=None, + bubble=False, + compression_quality=9, + ): self._compression_quality = compression_quality - super(GZIPCompressionHandler, self).__init__(filename, mode='wb', encoding=encoding, level=level, - format_string=format_string, delay=delay, filter=filter, bubble=bubble) + super().__init__( + filename, + mode="wb", + encoding=encoding, + level=level, + format_string=format_string, + delay=delay, + filter=filter, + bubble=bubble, + ) def _open(self, mode=None): if mode is None: mode = self._mode - self.stream = gzip.open(self._filename, mode, compresslevel=self._compression_quality) + self.stream = gzip.open( + self._filename, mode, compresslevel=self._compression_quality + ) def write(self, item): if isinstance(item, str): @@ -674,24 +723,44 @@ def should_flush(self): class BrotliCompressionHandler(FileHandler): - def __init__(self, filename, encoding=None, level=NOTSET, - format_string=None, delay=False, filter=None, bubble=False, - compression_window_size=4*1024**2, compression_quality=11): - super(BrotliCompressionHandler, self).__init__(filename, mode='wb', encoding=encoding, level=level, - format_string=format_string, delay=delay, filter=filter, bubble=bubble) + def __init__( + self, + filename, + encoding=None, + level=NOTSET, + format_string=None, + delay=False, + filter=None, + bubble=False, + compression_window_size=4 * 1024**2, + compression_quality=11, + ): + super().__init__( + filename, + mode="wb", + encoding=encoding, + level=level, + format_string=format_string, + delay=delay, + filter=filter, + bubble=bubble, + ) try: from brotli import Compressor except ImportError: - raise RuntimeError('The brotli library is required for ' - 'the BrotliCompressionHandler.') + raise RuntimeError( + "The brotli library is required for the BrotliCompressionHandler." + ) max_window_size = int(math.log(compression_window_size, 2)) - self._compressor = Compressor(quality=compression_quality, lgwin=max_window_size) + self._compressor = Compressor( + quality=compression_quality, lgwin=max_window_size + ) def _open(self, mode=None): if mode is None: mode = self._mode - self.stream = io.open(self._filename, mode) + self.stream = open(self._filename, mode) def write(self, item): if isinstance(item, str): @@ -700,7 +769,7 @@ def write(self, item): if ret: self.ensure_stream_is_open() self.stream.write(ret) - super(BrotliCompressionHandler, self).flush() + super().flush() def should_flush(self): return False @@ -711,14 +780,14 @@ def flush(self): if ret: self.ensure_stream_is_open() self.stream.write(ret) - super(BrotliCompressionHandler, self).flush() + super().flush() def close(self): if self._compressor is not None: self.ensure_stream_is_open() self.stream.write(self._compressor.finish()) self._compressor = None - super(BrotliCompressionHandler, self).close() + super().close() class MonitoringFileHandler(FileHandler): @@ -730,13 +799,22 @@ class MonitoringFileHandler(FileHandler): work on a windows system. """ - def __init__(self, filename, mode='a', encoding='utf-8', level=NOTSET, - format_string=None, delay=False, filter=None, bubble=False): - FileHandler.__init__(self, filename, mode, encoding, level, - format_string, delay, filter, bubble) - if os.name == 'nt': - raise RuntimeError('MonitoringFileHandler ' - 'does not support Windows') + def __init__( + self, + filename, + mode="a", + encoding="utf-8", + level=NOTSET, + format_string=None, + delay=False, + filter=None, + bubble=False, + ): + FileHandler.__init__( + self, filename, mode, encoding, level, format_string, delay, filter, bubble + ) + if os.name == "nt": + raise RuntimeError("MonitoringFileHandler does not support Windows") self._query_fd() def _query_fd(self): @@ -780,10 +858,10 @@ class StderrHandler(StreamHandler): point to the old one. """ - def __init__(self, level=NOTSET, format_string=None, filter=None, - bubble=False): - StreamHandler.__init__(self, _missing, level, format_string, - None, filter, bubble) + def __init__(self, level=NOTSET, format_string=None, filter=None, bubble=False): + StreamHandler.__init__( + self, _missing, level, format_string, None, filter, bubble + ) @property def stream(self): @@ -804,15 +882,25 @@ class RotatingFileHandler(FileHandler): asking on rollover. """ - def __init__(self, filename, mode='a', encoding='utf-8', level=NOTSET, - format_string=None, delay=False, max_size=1024 * 1024, - backup_count=5, filter=None, bubble=False): - FileHandler.__init__(self, filename, mode, encoding, level, - format_string, delay, filter, bubble) + def __init__( + self, + filename, + mode="a", + encoding="utf-8", + level=NOTSET, + format_string=None, + delay=False, + max_size=1024 * 1024, + backup_count=5, + filter=None, + bubble=False, + ): + FileHandler.__init__( + self, filename, mode, encoding, level, format_string, delay, filter, bubble + ) self.max_size = max_size self.backup_count = backup_count - assert backup_count > 0, ('at least one backup file has to be ' - 'specified') + assert backup_count > 0, "at least one backup file has to be specified" def should_rollover(self, record, bytes): self.stream.seek(0, 2) @@ -820,17 +908,17 @@ def should_rollover(self, record, bytes): def perform_rollover(self): self.stream.close() - for x in xrange(self.backup_count - 1, 0, -1): - src = '%s.%d' % (self._filename, x) - dst = '%s.%d' % (self._filename, x + 1) + for x in range(self.backup_count - 1, 0, -1): + src = "%s.%d" % (self._filename, x) + dst = "%s.%d" % (self._filename, x + 1) try: rename(src, dst) except OSError: e = sys.exc_info()[1] if e.errno != errno.ENOENT: raise - rename(self._filename, self._filename + '.1') - self._open('w') + rename(self._filename, self._filename + ".1") + self._open("w") def emit(self, record): msg = self.format(record) @@ -885,11 +973,20 @@ class TimedRotatingFileHandler(FileHandler): until it is rolled over """ - def __init__(self, filename, mode='a', encoding='utf-8', level=NOTSET, - format_string=None, date_format='%Y-%m-%d', - backup_count=0, filter=None, bubble=False, - timed_filename_for_current=True, - rollover_format='{basename}-{timestamp}{ext}'): + def __init__( + self, + filename, + mode="a", + encoding="utf-8", + level=NOTSET, + format_string=None, + date_format="%Y-%m-%d", + backup_count=0, + filter=None, + bubble=False, + timed_filename_for_current=True, + rollover_format="{basename}-{timestamp}{ext}", + ): self.date_format = date_format self.backup_count = backup_count @@ -904,13 +1001,12 @@ def __init__(self, filename, mode='a', encoding='utf-8', level=NOTSET, filename = self.generate_timed_filename(self._timestamp) elif os.path.exists(filename): self._timestamp = self._get_timestamp( - datetime.fromtimestamp( - os.stat(filename).st_mtime - ) + datetime.fromtimestamp(os.stat(filename).st_mtime) ) - FileHandler.__init__(self, filename, mode, encoding, level, - format_string, True, filter, bubble) + FileHandler.__init__( + self, filename, mode, encoding, level, format_string, True, filter, bubble + ) def _get_timestamp(self, datetime): """ @@ -924,9 +1020,8 @@ def generate_timed_filename(self, timestamp): to the handler at init time. """ timed_filename = self.rollover_format.format( - basename=self.basename, - timestamp=timestamp, - ext=self.ext) + basename=self.basename, timestamp=timestamp, ext=self.ext + ) return timed_filename def files_to_delete(self): @@ -935,18 +1030,20 @@ def files_to_delete(self): """ directory = os.path.dirname(self._filename) files = [] - rollover_regex = re.compile(self.rollover_format.format( - basename=re.escape(self.basename), - timestamp='.+', - ext=re.escape(self.ext), - )) + rollover_regex = re.compile( + self.rollover_format.format( + basename=re.escape(self.basename), + timestamp=".+", + ext=re.escape(self.ext), + ) + ) for filename in os.listdir(directory): filename = os.path.join(directory, filename) if rollover_regex.match(filename): files.append((os.path.getmtime(filename), filename)) files.sort() if self.backup_count > 1: - return files[:-self.backup_count + 1] + return files[: -self.backup_count + 1] else: return files[:] @@ -954,10 +1051,7 @@ def perform_rollover(self, new_timestamp): if self.stream is not None: self.stream.close() - if ( - not self.timed_filename_for_current - and os.path.exists(self._filename) - ): + if not self.timed_filename_for_current and os.path.exists(self._filename): filename = self.generate_timed_filename(self._timestamp) os.rename(self._filename, filename) @@ -969,7 +1063,7 @@ def perform_rollover(self, new_timestamp): self._filename = self.generate_timed_filename(new_timestamp) self._timestamp = new_timestamp - self._open('w') + self._open("w") def emit(self, record): msg = self.format(record) @@ -996,10 +1090,17 @@ def my_test(): assert logger.has_warning('A warning') ... """ + default_format_string = TEST_FORMAT_STRING - def __init__(self, level=NOTSET, format_string=None, filter=None, - bubble=False, force_heavy_init=False): + def __init__( + self, + level=NOTSET, + format_string=None, + filter=None, + bubble=False, + force_heavy_init=False, + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) #: captures the :class:`LogRecord`\s as instances @@ -1025,9 +1126,9 @@ def emit(self, record): @property def formatted_records(self): """Captures the formatted log records as unicode strings.""" - if (len(self._formatted_record_cache) != len(self.records) or - any(r1 != r2 for r1, r2 in - zip(self.records, self._formatted_record_cache))): + if len(self._formatted_record_cache) != len(self.records) or any( + r1 != r2 for r1, r2 in zip(self.records, self._formatted_record_cache) + ): self._formatted_records = [self.format(r) for r in self.records] self._formatted_record_cache = list(self.records) return self._formatted_records @@ -1072,7 +1173,7 @@ def has_critical(self, *args, **kwargs): See :ref:`probe-log-records` for more information. """ - kwargs['level'] = CRITICAL + kwargs["level"] = CRITICAL return self._test_for(*args, **kwargs) def has_error(self, *args, **kwargs): @@ -1080,7 +1181,7 @@ def has_error(self, *args, **kwargs): See :ref:`probe-log-records` for more information. """ - kwargs['level'] = ERROR + kwargs["level"] = ERROR return self._test_for(*args, **kwargs) def has_warning(self, *args, **kwargs): @@ -1088,7 +1189,7 @@ def has_warning(self, *args, **kwargs): See :ref:`probe-log-records` for more information. """ - kwargs['level'] = WARNING + kwargs["level"] = WARNING return self._test_for(*args, **kwargs) def has_notice(self, *args, **kwargs): @@ -1096,7 +1197,7 @@ def has_notice(self, *args, **kwargs): See :ref:`probe-log-records` for more information. """ - kwargs['level'] = NOTICE + kwargs["level"] = NOTICE return self._test_for(*args, **kwargs) def has_info(self, *args, **kwargs): @@ -1104,7 +1205,7 @@ def has_info(self, *args, **kwargs): See :ref:`probe-log-records` for more information. """ - kwargs['level'] = INFO + kwargs["level"] = INFO return self._test_for(*args, **kwargs) def has_debug(self, *args, **kwargs): @@ -1112,7 +1213,7 @@ def has_debug(self, *args, **kwargs): See :ref:`probe-log-records` for more information. """ - kwargs['level'] = DEBUG + kwargs["level"] = DEBUG return self._test_for(*args, **kwargs) def has_trace(self, *args, **kwargs): @@ -1120,7 +1221,7 @@ def has_trace(self, *args, **kwargs): See :ref:`probe-log-records` for more information. """ - kwargs['level'] = TRACE + kwargs["level"] = TRACE return self._test_for(*args, **kwargs) def _test_for(self, message=None, channel=None, level=None): @@ -1131,6 +1232,7 @@ def _match(needle, haystack): if needle == haystack: return True return False + for record in self.records: if level is not None and record.level != level: continue @@ -1142,8 +1244,7 @@ def _match(needle, haystack): return False -class MailHandler(Handler, StringFormatterHandlerMixin, - LimitingHandlerMixin): +class MailHandler(Handler, StringFormatterHandlerMixin, LimitingHandlerMixin): """A handler that sends error mails. The format string used by this handler are the contents of the mail plus the headers. This is handy if you want to use a custom subject or ``X-`` header:: @@ -1206,9 +1307,10 @@ class MailHandler(Handler, StringFormatterHandlerMixin, .. versionchanged:: 1.0 `secure` can now be a dictionary or boolean in addition to to a tuple. """ + default_format_string = MAIL_FORMAT_STRING default_related_format_string = MAIL_RELATED_FORMAT_STRING - default_subject = u('Server Error in Application') + default_subject = "Server Error in Application" #: the maximum number of record hashes in the cache for the limiting #: feature. Afterwards, record_cache_prune percent of the oldest @@ -1218,11 +1320,23 @@ class MailHandler(Handler, StringFormatterHandlerMixin, #: the number of items to prune on a cache overflow in percent. record_cache_prune = 0.333 - def __init__(self, from_addr, recipients, subject=None, - server_addr=None, credentials=None, secure=None, - record_limit=None, record_delta=None, level=NOTSET, - format_string=None, related_format_string=None, - filter=None, bubble=False, starttls=True): + def __init__( + self, + from_addr, + recipients, + subject=None, + server_addr=None, + credentials=None, + secure=None, + record_limit=None, + record_delta=None, + level=NOTSET, + format_string=None, + related_format_string=None, + filter=None, + bubble=False, + starttls=True, + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) LimitingHandlerMixin.__init__(self, record_limit, record_delta) @@ -1248,8 +1362,10 @@ def _set_related_format_string(self, value): self.related_formatter = None else: self.related_formatter = self.formatter_class(value) - related_format_string = property(_get_related_format_string, - _set_related_format_string) + + related_format_string = property( + _get_related_format_string, _set_related_format_string + ) del _get_related_format_string, _set_related_format_string def get_recipients(self, record): @@ -1263,37 +1379,35 @@ def message_from_record(self, record, suppressed): (:class:`email.message.Message`). `suppressed` is the number of mails not sent if the `record_limit` feature is active. """ - from email.message import Message from email.header import Header + from email.message import Message + msg = Message() - msg.set_charset('utf-8') + msg.set_charset("utf-8") lineiter = iter(self.format(record).splitlines()) for line in lineiter: if not line: break - h, v = line.split(':', 1) + h, v = line.split(":", 1) # We could probably just encode everything. For the moment encode # only what really needed to avoid breaking a couple of tests. try: - v.encode('ascii') + v.encode("ascii") except UnicodeEncodeError: - msg[h.strip()] = Header(v.strip(), 'utf-8') + msg[h.strip()] = Header(v.strip(), "utf-8") else: msg[h.strip()] = v.strip() - msg.replace_header('Content-Transfer-Encoding', '8bit') + msg.replace_header("Content-Transfer-Encoding", "8bit") - body = '\r\n'.join(lineiter) + body = "\r\n".join(lineiter) if suppressed: - body += ('\r\n\r\nThis message occurred additional %d ' - 'time(s) and was suppressed' % suppressed) - - # inconsistency in Python 2.5 - # other versions correctly return msg.get_payload() as str - if sys.version_info < (2, 6) and isinstance(body, unicode): - body = body.encode('utf-8') + body += ( + "\r\n\r\nThis message occurred additional %d " + "time(s) and was suppressed" % suppressed + ) - msg.set_payload(body, 'UTF-8') + msg.set_payload(body, "UTF-8") return msg def format_related_record(self, record): @@ -1308,33 +1422,38 @@ def generate_mail(self, record, suppressed=0): that were not send if the `record_limit` feature is active. """ from email.utils import formatdate + msg = self.message_from_record(record, suppressed) - msg['From'] = self.from_addr - msg['Date'] = formatdate() + msg["From"] = self.from_addr + msg["Date"] = formatdate() return msg def collapse_mails(self, mail, related, reason): - """When escaling or grouped mails are """ + """When escaling or grouped mails are""" if not related: return mail - if reason == 'group': - title = 'Other log records in the same group' + if reason == "group": + title = "Other log records in the same group" else: - title = 'Log records that led up to this one' - mail.set_payload('%s\r\n\r\n\r\n%s:\r\n\r\n%s' % ( - mail.get_payload(), - title, - '\r\n\r\n'.join(body.rstrip() for body in related) - ), 'UTF-8') + title = "Log records that led up to this one" + mail.set_payload( + "{}\r\n\r\n\r\n{}:\r\n\r\n{}".format( + mail.get_payload(), + title, + "\r\n\r\n".join(body.rstrip() for body in related), + ), + "UTF-8", + ) return mail def get_connection(self): """Returns an SMTP connection. By default it reconnects for each sent mail. """ - from smtplib import SMTP, SMTP_SSL, SMTP_PORT, SMTP_SSL_PORT + from smtplib import SMTP, SMTP_PORT, SMTP_SSL, SMTP_SSL_PORT + if self.server_addr is None: - host = '127.0.0.1' + host = "127.0.0.1" port = self.secure and SMTP_SSL_PORT or SMTP_PORT else: try: @@ -1355,10 +1474,10 @@ def get_connection(self): # - tuple to be unpacked to variables keyfile and certfile. # - secure=() equivalent to secure=True for backwards compatibility. # - secure=False equivalent to secure=None to disable. - if isinstance(self.secure, collections_abc.Mapping): - keyfile = self.secure.get('keyfile', None) - certfile = self.secure.get('certfile', None) - elif isinstance(self.secure, collections_abc.Iterable): + if isinstance(self.secure, Mapping): + keyfile = self.secure.get("keyfile", None) + certfile = self.secure.get("certfile", None) + elif isinstance(self.secure, Iterable): # Allow empty tuple for backwards compatibility if len(self.secure) == 0: keyfile = certfile = None @@ -1381,7 +1500,7 @@ def get_connection(self): con.ehlo() # Allow credentials to be a tuple or dict. - if isinstance(self.credentials, collections_abc.Mapping): + if isinstance(self.credentials, Mapping): credentials_args = () credentials_kwargs = self.credentials else: @@ -1415,17 +1534,18 @@ def emit(self, record): suppressed, allow_delivery = self.check_delivery(record) if not allow_delivery: return - self.deliver(self.generate_mail(record, suppressed), - self.get_recipients(record)) + self.deliver( + self.generate_mail(record, suppressed), self.get_recipients(record) + ) def emit_batch(self, records, reason): - if reason not in ('escalation', 'group'): + if reason not in ("escalation", "group"): raise RuntimeError("reason must be either 'escalation' or 'group'") records = list(records) if not records: return - trigger = records.pop(reason == 'escalation' and -1 or 0) + trigger = records.pop(reason == "escalation" and -1 or 0) suppressed = 0 if self.record_limit is not None: suppressed, allow_delivery = self.check_delivery(trigger) @@ -1433,11 +1553,12 @@ def emit_batch(self, records, reason): return trigger_mail = self.generate_mail(trigger, suppressed) - related = [self.format_related_record(record) - for record in records] + related = [self.format_related_record(record) for record in records] - self.deliver(self.collapse_mails(trigger_mail, related, reason), - self.get_recipients(trigger)) + self.deliver( + self.collapse_mails(trigger_mail, related, reason), + self.get_recipients(trigger), + ) class GMailHandler(MailHandler): @@ -1453,123 +1574,138 @@ class GMailHandler(MailHandler): """ def __init__(self, account_id, password, recipients, **kw): - super(GMailHandler, self).__init__( - account_id, recipients, secure=True, + super().__init__( + account_id, + recipients, + secure=True, server_addr=("smtp.gmail.com", 587), - credentials=(account_id, password), **kw) + credentials=(account_id, password), + **kw, + ) class SyslogHandler(Handler, StringFormatterHandlerMixin): """A handler class which sends formatted logging records to a syslog server. By default it will send to it via unix socket. """ + default_format_string = SYSLOG_FORMAT_STRING # priorities - LOG_EMERG = 0 # system is unusable - LOG_ALERT = 1 # action must be taken immediately - LOG_CRIT = 2 # critical conditions - LOG_ERR = 3 # error conditions - LOG_WARNING = 4 # warning conditions - LOG_NOTICE = 5 # normal but significant condition - LOG_INFO = 6 # informational - LOG_DEBUG = 7 # debug-level messages + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages # facility codes - LOG_KERN = 0 # kernel messages - LOG_USER = 1 # random user-level messages - LOG_MAIL = 2 # mail system - LOG_DAEMON = 3 # system daemons - LOG_AUTH = 4 # security/authorization messages - LOG_SYSLOG = 5 # messages generated internally by syslogd - LOG_LPR = 6 # line printer subsystem - LOG_NEWS = 7 # network news subsystem - LOG_UUCP = 8 # UUCP subsystem - LOG_CRON = 9 # clock daemon - LOG_AUTHPRIV = 10 # security/authorization messages (private) - LOG_FTP = 11 # FTP daemon + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + LOG_FTP = 11 # FTP daemon # other codes through 15 reserved for system use - LOG_LOCAL0 = 16 # reserved for local use - LOG_LOCAL1 = 17 # reserved for local use - LOG_LOCAL2 = 18 # reserved for local use - LOG_LOCAL3 = 19 # reserved for local use - LOG_LOCAL4 = 20 # reserved for local use - LOG_LOCAL5 = 21 # reserved for local use - LOG_LOCAL6 = 22 # reserved for local use - LOG_LOCAL7 = 23 # reserved for local use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use facility_names = { - 'auth': LOG_AUTH, - 'authpriv': LOG_AUTHPRIV, - 'cron': LOG_CRON, - 'daemon': LOG_DAEMON, - 'ftp': LOG_FTP, - 'kern': LOG_KERN, - 'lpr': LOG_LPR, - 'mail': LOG_MAIL, - 'news': LOG_NEWS, - 'syslog': LOG_SYSLOG, - 'user': LOG_USER, - 'uucp': LOG_UUCP, - 'local0': LOG_LOCAL0, - 'local1': LOG_LOCAL1, - 'local2': LOG_LOCAL2, - 'local3': LOG_LOCAL3, - 'local4': LOG_LOCAL4, - 'local5': LOG_LOCAL5, - 'local6': LOG_LOCAL6, - 'local7': LOG_LOCAL7, + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "ftp": LOG_FTP, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, } level_priority_map = { - DEBUG: LOG_DEBUG, - INFO: LOG_INFO, - NOTICE: LOG_NOTICE, - WARNING: LOG_WARNING, - ERROR: LOG_ERR, - CRITICAL: LOG_CRIT + DEBUG: LOG_DEBUG, + INFO: LOG_INFO, + NOTICE: LOG_NOTICE, + WARNING: LOG_WARNING, + ERROR: LOG_ERR, + CRITICAL: LOG_CRIT, } - def __init__(self, application_name=None, address=None, - facility='user', socktype=socket.SOCK_DGRAM, - level=NOTSET, format_string=None, filter=None, - bubble=False, record_delimiter=None): + def __init__( + self, + application_name=None, + address=None, + facility="user", + socktype=socket.SOCK_DGRAM, + level=NOTSET, + format_string=None, + filter=None, + bubble=False, + record_delimiter=None, + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) self.application_name = application_name if address is None: - if sys.platform == 'darwin': - address = '/var/run/syslog' + if sys.platform == "darwin": + address = "/var/run/syslog" else: - address = '/dev/log' + address = "/dev/log" self.remote_address = self.address = address self.facility = facility self.socktype = socktype - if isinstance(address, string_types): + if isinstance(address, str): self._connect_unixsocket() self.enveloper = self.unix_envelope - default_delimiter = u'\x00' + default_delimiter = "\x00" else: self._connect_netsocket() self.enveloper = self.net_envelope - default_delimiter = u'\n' + default_delimiter = "\n" - self.record_delimiter = default_delimiter \ - if record_delimiter is None else record_delimiter + self.record_delimiter = ( + default_delimiter if record_delimiter is None else record_delimiter + ) self.connection_exception = getattr( - __builtins__, 'BrokenPipeError', socket.error) + __builtins__, "BrokenPipeError", socket.error + ) def _connect_unixsocket(self): self.unixsocket = True self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: self.socket.connect(self.address) - except socket.error: + except OSError: self.socket.close() self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.socket.connect(self.address) @@ -1583,20 +1719,19 @@ def _connect_netsocket(self): def encode_priority(self, record): facility = self.facility_names[self.facility] - priority = self.level_priority_map.get(record.level, - self.LOG_WARNING) + priority = self.level_priority_map.get(record.level, self.LOG_WARNING) return (facility << 3) | priority def wrap_segments(self, record, before): msg = self.format(record) segments = [segment for segment in msg.split(self.record_delimiter)] - return (before + segment + self.record_delimiter - for segment in segments) - + return (before + segment + self.record_delimiter for segment in segments) + def unix_envelope(self, record): - before = u'<{}>{}'.format( + before = "<{}>{}".format( self.encode_priority(record), - self.application_name + ':' if self.application_name else '') + self.application_name + ":" if self.application_name else "", + ) return self.wrap_segments(record, before) def net_envelope(self, record): @@ -1604,19 +1739,22 @@ def net_envelope(self, record): try: format_string = self.format_string application_name = self.application_name - if not application_name and record.channel and \ - '{record.channel}: ' in format_string: - self.format_string = format_string.replace( - '{record.channel}: ', '') + if ( + not application_name + and record.channel + and "{record.channel}: " in format_string + ): + self.format_string = format_string.replace("{record.channel}: ", "") self.application_name = record.channel # RFC 5424: version timestamp hostname app-name procid # msgid structured-data message - before = u'<{}>1 {}Z {} {} {} - - '.format( + before = "<{}>1 {}Z {} {} {} - - ".format( self.encode_priority(record), record.time.isoformat(), socket.gethostname(), - self.application_name if self.application_name else '-', - record.process) + self.application_name if self.application_name else "-", + record.process, + ) return self.wrap_segments(record, before) finally: self.format_string = format_string @@ -1624,13 +1762,13 @@ def net_envelope(self, record): def emit(self, record): for segment in self.enveloper(record): - self.send_to_socket(segment.encode('utf-8')) + self.send_to_socket(segment.encode("utf-8")) def send_to_socket(self, data): if self.unixsocket: try: self.socket.send(data) - except socket.error: + except OSError: self._connect_unixsocket() self.socket.send(data) elif self.socktype == socket.SOCK_DGRAM: @@ -1649,44 +1787,53 @@ def close(self): class NTEventLogHandler(Handler, StringFormatterHandlerMixin): """A handler that sends to the NT event log system.""" + dllname = None default_format_string = NTLOG_FORMAT_STRING - def __init__(self, application_name, log_type='Application', - level=NOTSET, format_string=None, filter=None, - bubble=False): + def __init__( + self, + application_name, + log_type="Application", + level=NOTSET, + format_string=None, + filter=None, + bubble=False, + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) - if os.name != 'nt': - raise RuntimeError('NTLogEventLogHandler requires a Windows ' - 'operating system.') + if os.name != "nt": + raise RuntimeError( + "NTLogEventLogHandler requires a Windows operating system." + ) try: - import win32evtlogutil import win32evtlog + import win32evtlogutil except ImportError: - raise RuntimeError('The pywin32 library is required ' - 'for the NTEventLogHandler.') + raise RuntimeError( + "The pywin32 library is required for the NTEventLogHandler." + ) self.application_name = application_name self._welu = win32evtlogutil dllname = self.dllname if not dllname: - dllname = os.path.join(os.path.dirname(self._welu.__file__), - '../win32service.pyd') + dllname = os.path.join( + os.path.dirname(self._welu.__file__), "../win32service.pyd" + ) self.log_type = log_type - self._welu.AddSourceToRegistry(self.application_name, dllname, - log_type) + self._welu.AddSourceToRegistry(self.application_name, dllname, log_type) self._default_type = win32evtlog.EVENTLOG_INFORMATION_TYPE self._type_map = { - DEBUG: win32evtlog.EVENTLOG_INFORMATION_TYPE, - INFO: win32evtlog.EVENTLOG_INFORMATION_TYPE, - NOTICE: win32evtlog.EVENTLOG_INFORMATION_TYPE, - WARNING: win32evtlog.EVENTLOG_WARNING_TYPE, - ERROR: win32evtlog.EVENTLOG_ERROR_TYPE, - CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE + DEBUG: win32evtlog.EVENTLOG_INFORMATION_TYPE, + INFO: win32evtlog.EVENTLOG_INFORMATION_TYPE, + NOTICE: win32evtlog.EVENTLOG_INFORMATION_TYPE, + WARNING: win32evtlog.EVENTLOG_WARNING_TYPE, + ERROR: win32evtlog.EVENTLOG_ERROR_TYPE, + CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, } def unregister_logger(self): @@ -1694,8 +1841,7 @@ def unregister_logger(self): this, the log viewer will no longer be able to provide any information about the message. """ - self._welu.RemoveSourceFromRegistry(self.application_name, - self.log_type) + self._welu.RemoveSourceFromRegistry(self.application_name, self.log_type) def get_event_type(self, record): return self._type_map.get(record.level, self._default_type) @@ -1716,8 +1862,9 @@ def emit(self, record): id = self.get_message_id(record) cat = self.get_event_category(record) type = self.get_event_type(record) - self._welu.ReportEvent(self.application_name, id, cat, type, - [self.format(record)]) + self._welu.ReportEvent( + self.application_name, id, cat, type, [self.format(record)] + ) class FingersCrossedHandler(Handler): @@ -1767,7 +1914,7 @@ def application(environ, start_response): .. versionchanged:: 0.3 The default behaviour is to buffer up records and then invoke another - handler when a severity theshold was reached with the buffer emitting. + handler when a severity threshold was reached with the buffer emitting. This now enables this logger to be properly used with the :class:`~logbook.MailHandler`. You will now only get one mail for each buffered record. However once the threshold was reached you would @@ -1788,11 +1935,18 @@ def application(environ, start_response): #: ``'escalation'``. #: #: .. versionadded:: 0.3 - batch_emit_reason = 'escalation' - - def __init__(self, handler, action_level=ERROR, buffer_size=0, - pull_information=True, reset=False, filter=None, - bubble=False): + batch_emit_reason = "escalation" + + def __init__( + self, + handler, + action_level=ERROR, + buffer_size=0, + pull_information=True, + reset=False, + filter=None, + bubble=False, + ): Handler.__init__(self, NOTSET, filter, bubble) self.lock = new_fine_grained_lock() self._level = action_level @@ -1828,8 +1982,7 @@ def enqueue(self, record): self.buffered_records.append(record) if self._buffer_full: self.buffered_records.popleft() - elif (self.buffer_size and - len(self.buffered_records) >= self.buffer_size): + elif self.buffer_size and len(self.buffered_records) >= self.buffer_size: self._buffer_full = True return record.level >= self._level return False @@ -1837,7 +1990,7 @@ def enqueue(self, record): def rollover(self, record): if self._handler is None: self._handler = self._handler_factory(record, self) - self._handler.emit_batch(iter(self.buffered_records), 'escalation') + self._handler.emit_batch(iter(self.buffered_records), "escalation") self.buffered_records.clear() self._action_triggered = not self._reset @@ -1880,8 +2033,8 @@ class GroupHandler(WrapperHandler): .. versionadded:: 0.3 """ - _direct_attrs = frozenset(['handler', 'pull_information', - 'buffered_records']) + + _direct_attrs = frozenset(["handler", "pull_information", "buffered_records"]) def __init__(self, handler, pull_information=True): WrapperHandler.__init__(self, handler) @@ -1889,7 +2042,7 @@ def __init__(self, handler, pull_information=True): self.buffered_records = [] def rollover(self): - self.handler.emit_batch(self.buffered_records, 'group') + self.handler.emit_batch(self.buffered_records, "group") self.buffered_records = [] def pop_application(self): diff --git a/logbook/helpers.py b/src/logbook/helpers.py similarity index 57% rename from logbook/helpers.py rename to src/logbook/helpers.py index 4ea693f7..233f2ebc 100644 --- a/logbook/helpers.py +++ b/src/logbook/helpers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.helpers ~~~~~~~~~~~~~~~ @@ -8,99 +7,27 @@ :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. :license: BSD, see LICENSE for more details. """ +import errno import os +import random import re import sys -import errno import time -import random from datetime import datetime, timedelta -PY2 = sys.version_info[0] == 2 - -if PY2: - import __builtin__ as _builtins - import collections as collections_abc -else: - import builtins as _builtins - import collections.abc as collections_abc - -try: - import json -except ImportError: - import simplejson as json - -if PY2: - from cStringIO import StringIO - iteritems = dict.iteritems - from itertools import izip as zip - xrange = _builtins.xrange -else: - from io import StringIO - zip = _builtins.zip - xrange = range - iteritems = dict.items - -_IDENTITY = lambda obj: obj - -if PY2: - def u(s): - return unicode(s, "unicode_escape") -else: - u = _IDENTITY - -if PY2: - integer_types = (int, long) - string_types = (basestring,) -else: - integer_types = (int,) - string_types = (str,) - -if PY2: - import httplib as http_client -else: - from http import client as http_client - -if PY2: - # Yucky, but apparently that's the only way to do this - exec(""" -def reraise(tp, value, tb=None): - raise tp, value, tb -""", locals(), globals()) -else: - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - # this regexp also matches incompatible dates like 20070101 because # some libraries (like the python xmlrpclib modules) use this _iso8601_re = re.compile( # date - r'(\d{4})(?:-?(\d{2})(?:-?(\d{2}))?)?' + r"(\d{4})(?:-?(\d{2})(?:-?(\d{2}))?)?" # time - r'(?:T(\d{2}):(\d{2})(?::(\d{2}(?:\.\d+)?))?(Z|[+-]\d{2}:\d{2})?)?$' + r"(?:T(\d{2}):(\d{2})(?::(\d{2}(?:\.\d+)?))?(Z|[+-]\d{2}:\d{2})?)?$" ) _missing = object() -if PY2: - def b(x): - return x - - def _is_text_stream(x): - return True -else: - import io - - def b(x): - return x.encode('ascii') - - def _is_text_stream(stream): - return isinstance(stream, io.TextIOBase) can_rename_open_file = False -if os.name == 'nt': +if os.name == "nt": try: import ctypes @@ -109,18 +36,14 @@ def _is_text_stream(stream): _MoveFileEx = ctypes.windll.kernel32.MoveFileExW def _rename(src, dst): - if PY2: - if not isinstance(src, unicode): - src = unicode(src, sys.getfilesystemencoding()) - if not isinstance(dst, unicode): - dst = unicode(dst, sys.getfilesystemencoding()) if _rename_atomic(src, dst): return True retry = 0 rv = False while not rv and retry < 100: - rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING | - _MOVEFILE_WRITE_THROUGH) + rv = _MoveFileEx( + src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH + ) if not rv: time.sleep(0.001) retry += 1 @@ -134,16 +57,21 @@ def _rename(src, dst): can_rename_open_file = True def _rename_atomic(src, dst): - ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Logbook rename') + ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, "Logbook rename") if ta == -1: return False try: retry = 0 rv = False while not rv and retry < 100: - rv = _MoveFileTransacted(src, dst, None, None, - _MOVEFILE_REPLACE_EXISTING | - _MOVEFILE_WRITE_THROUGH, ta) + rv = _MoveFileTransacted( + src, + dst, + None, + None, + _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH, + ta, + ) if rv: rv = _CommitTransaction(ta) break @@ -153,7 +81,9 @@ def _rename_atomic(src, dst): return rv finally: _CloseHandle(ta) + except Exception: + def _rename(src, dst): return False @@ -171,29 +101,29 @@ def rename(src, dst): e = sys.exc_info()[1] if e.errno not in (errno.EEXIST, errno.EACCES): raise - old = "%s-%08x" % (dst, random.randint(0, 2 ** 31 - 1)) + old = f"{dst}-{random.randint(0, 2**31 - 1):08x}" os.rename(dst, old) os.rename(src, dst) try: os.unlink(old) except Exception: pass + else: rename = os.rename can_rename_open_file = True -_JSON_SIMPLE_TYPES = (bool, float) + integer_types + string_types +_JSON_SIMPLE_TYPES = (bool, float, int, str) def to_safe_json(data): """Makes a data structure safe for JSON silently discarding invalid objects from nested structures. This also converts dates. """ + def _convert(obj): if obj is None: return None - elif PY2 and isinstance(obj, str): - return obj.decode('utf-8', 'replace') elif isinstance(obj, _JSON_SIMPLE_TYPES): return obj elif isinstance(obj, datetime): @@ -204,13 +134,14 @@ def _convert(obj): return tuple(_convert(x) for x in obj) elif isinstance(obj, dict): rv = {} - for key, value in iteritems(obj): - if not isinstance(key, string_types): + for key, value in obj.items(): + if not isinstance(key, str): key = str(key) - if not is_unicode(key): - key = u(key) + if not isinstance(key, str): + key = key rv[key] = _convert(value) return rv + return _convert(data) @@ -218,10 +149,10 @@ def format_iso8601(d=None): """Returns a date in iso8601 format.""" if d is None: d = datetime.utcnow() - rv = d.strftime('%Y-%m-%dT%H:%M:%S') + rv = d.strftime("%Y-%m-%dT%H:%M:%S") if d.microsecond: - rv += '.' + str(d.microsecond) - return rv + 'Z' + rv += "." + str(d.microsecond) + return rv + "Z" def parse_iso8601(value): @@ -230,7 +161,7 @@ def parse_iso8601(value): """ m = _iso8601_re.match(value) if m is None: - raise ValueError('not a valid iso8601 date value') + raise ValueError("not a valid iso8601 date value") groups = m.groups() args = [] @@ -240,19 +171,19 @@ def parse_iso8601(value): args.append(group) seconds = groups[-2] if seconds is not None: - if '.' in seconds: - sec, usec = seconds.split('.') + if "." in seconds: + sec, usec = seconds.split(".") args.append(int(sec)) - args.append(int(usec.ljust(6, '0'))) + args.append(int(usec.ljust(6, "0"))) else: args.append(int(seconds)) rv = datetime(*args) tz = groups[-1] - if tz and tz != 'Z': - args = [int(x) for x in tz[1:].split(':')] + if tz and tz != "Z": + args = [int(x) for x in tz[1:].split(":")] delta = timedelta(hours=args[0], minutes=args[1]) - if tz[0] == '+': + if tz[0] == "+": rv -= delta else: rv += delta @@ -262,11 +193,11 @@ def parse_iso8601(value): def get_application_name(): if not sys.argv or not sys.argv[0]: - return 'Python' + return "Python" return os.path.basename(sys.argv[0]).title() -class cached_property(object): +class cached_property: """A property that is lazily calculated and then cached.""" def __init__(self, func, name=None, doc=None): @@ -287,23 +218,3 @@ def __get__(self, obj, type=None): def get_iterator_next_method(it): return lambda: next(it) - - -# python 2 support functions and aliases -def is_unicode(x): - if PY2: - return isinstance(x, unicode) - return isinstance(x, str) - -if PY2: - exec("""def with_metaclass(meta): - class _WithMetaclassBase(object): - __metaclass__ = meta - return _WithMetaclassBase -""") -else: - exec("""def with_metaclass(meta): - class _WithMetaclassBase(object, metaclass=meta): - pass - return _WithMetaclassBase -""") diff --git a/logbook/more.py b/src/logbook/more.py similarity index 69% rename from logbook/more.py rename to src/logbook/more.py index 3809cc01..4bff87b6 100644 --- a/logbook/more.py +++ b/src/logbook/more.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.more ~~~~~~~~~~~~ @@ -8,57 +7,50 @@ :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. :license: BSD, see LICENSE for more details. """ -import re import os import platform - +import re from collections import defaultdict from functools import partial +from urllib.parse import parse_qsl, urlencode -from logbook.base import ( - RecordDispatcher, dispatch_record, NOTSET, ERROR, NOTICE) -from logbook.handlers import ( - Handler, StringFormatter, StringFormatterHandlerMixin, StderrHandler) from logbook._termcolors import colorize -from logbook.helpers import PY2, string_types, iteritems, u -from logbook.ticketing import TicketingHandler as DatabaseHandler +from logbook.base import ERROR, NOTICE, NOTSET, RecordDispatcher, dispatch_record +from logbook.handlers import ( + Handler, + StderrHandler, + StringFormatter, + StringFormatterHandlerMixin, +) from logbook.ticketing import BackendBase +from logbook.ticketing import TicketingHandler as DatabaseHandler try: import riemann_client.client import riemann_client.transport except ImportError: riemann_client = None - #from riemann_client.transport import TCPTransport, UDPTransport, BlankTransport - + # from riemann_client.transport import TCPTransport, UDPTransport, BlankTransport -if PY2: - from urllib import urlencode - from urlparse import parse_qsl -else: - from urllib.parse import parse_qsl, urlencode - -_ws_re = re.compile(r'(\s+)', re.UNICODE) -TWITTER_FORMAT_STRING = u( - '[{record.channel}] {record.level_name}: {record.message}') -TWITTER_ACCESS_TOKEN_URL = 'https://twitter.com/oauth/access_token' -NEW_TWEET_URL = 'https://api.twitter.com/1/statuses/update.json' +_ws_re = re.compile(r"(\s+)", re.UNICODE) +TWITTER_FORMAT_STRING = "[{record.channel}] {record.level_name}: {record.message}" +TWITTER_ACCESS_TOKEN_URL = "https://twitter.com/oauth/access_token" +NEW_TWEET_URL = "https://api.twitter.com/1/statuses/update.json" class CouchDBBackend(BackendBase): - """Implements a backend that writes into a CouchDB database. - """ + """Implements a backend that writes into a CouchDB database.""" + def setup_backend(self): from couchdb import Server - uri = self.options.pop('uri', u('')) + uri = self.options.pop("uri", "") couch = Server(uri) - db_name = self.options.pop('db') + db_name = self.options.pop("db") self.database = couch[db_name] def record_ticket(self, record, data, hash, app_id): - """Records a log record as ticket. - """ + """Records a log record as ticket.""" db = self.database ticket = record.to_dict() @@ -72,11 +64,11 @@ class TwitterFormatter(StringFormatter): """Works like the standard string formatter and is used by the :class:`TwitterHandler` unless changed. """ + max_length = 140 def format_exception(self, record): - return u('%s: %s') % (record.exception_shortname, - record.exception_message) + return f"{record.exception_shortname}: {record.exception_message}" def __call__(self, record, handler): formatted = StringFormatter.__call__(self, record, handler) @@ -86,10 +78,10 @@ def __call__(self, record, handler): length += len(piece) if length > self.max_length: if length - len(piece) < self.max_length: - rv.append(u('…')) + rv.append("…") break rv.append(piece) - return u('').join(rv) + return "".join(rv) class TaggingLogger(RecordDispatcher): @@ -115,18 +107,19 @@ class TaggingLogger(RecordDispatcher): def __init__(self, name=None, tags=None): RecordDispatcher.__init__(self, name) # create a method for each tag named - for tag in (tags or ()): + for tag in tags or (): setattr(self, tag, partial(self.log, tag)) def log(self, tags, msg, *args, **kwargs): - if isinstance(tags, string_types): + if isinstance(tags, str): tags = [tags] - exc_info = kwargs.pop('exc_info', None) - extra = kwargs.pop('extra', {}) - extra['tags'] = list(tags) - frame_correction = kwargs.pop('frame_correction', 0) - return self.make_record_and_handle(NOTSET, msg, args, kwargs, - exc_info, extra, frame_correction) + exc_info = kwargs.pop("exc_info", None) + extra = kwargs.pop("extra", {}) + extra["tags"] = list(tags) + frame_correction = kwargs.pop("frame_correction", 0) + return self.make_record_and_handle( + NOTSET, msg, args, kwargs, exc_info, extra, frame_correction + ) class TaggingHandler(Handler): @@ -146,12 +139,13 @@ class TaggingHandler(Handler): def __init__(self, handlers, filter=None, bubble=False): Handler.__init__(self, NOTSET, filter, bubble) assert isinstance(handlers, dict) - self._handlers = dict( - (tag, isinstance(handler, Handler) and [handler] or handler) - for (tag, handler) in iteritems(handlers)) + self._handlers = { + tag: isinstance(handler, Handler) and [handler] or handler + for (tag, handler) in handlers.items() + } def emit(self, record): - for tag in record.extra.get('tags', ()): + for tag in record.extra.get("tags", ()): for handler in self._handlers.get(tag, ()): handler.handle(record) @@ -164,14 +158,23 @@ class TwitterHandler(Handler, StringFormatterHandlerMixin): If you don't want to register your own application and request xauth credentials, there are a couple of leaked consumer key and secret pairs from application explicitly whitelisted at Twitter - (`leaked secrets `_). + (`leaked secrets `_). """ + default_format_string = TWITTER_FORMAT_STRING formatter_class = TwitterFormatter - def __init__(self, consumer_key, consumer_secret, username, - password, level=NOTSET, format_string=None, filter=None, - bubble=False): + def __init__( + self, + consumer_key, + consumer_secret, + username, + password, + level=NOTSET, + format_string=None, + filter=None, + bubble=False, + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) self.consumer_key = consumer_key @@ -182,35 +185,37 @@ def __init__(self, consumer_key, consumer_secret, username, try: import oauth2 except ImportError: - raise RuntimeError('The python-oauth2 library is required for ' - 'the TwitterHandler.') + raise RuntimeError( + "The python-oauth2 library is required for the TwitterHandler." + ) self._oauth = oauth2 self._oauth_token = None self._oauth_token_secret = None - self._consumer = oauth2.Consumer(consumer_key, - consumer_secret) + self._consumer = oauth2.Consumer(consumer_key, consumer_secret) self._client = oauth2.Client(self._consumer) def get_oauth_token(self): """Returns the oauth access token.""" if self._oauth_token is None: resp, content = self._client.request( - TWITTER_ACCESS_TOKEN_URL + '?', 'POST', - body=urlencode({ - 'x_auth_username': self.username.encode('utf-8'), - 'x_auth_password': self.password.encode('utf-8'), - 'x_auth_mode': 'client_auth' - }), - headers={'Content-Type': 'application/x-www-form-urlencoded'} + TWITTER_ACCESS_TOKEN_URL + "?", + "POST", + body=urlencode( + { + "x_auth_username": self.username.encode("utf-8"), + "x_auth_password": self.password.encode("utf-8"), + "x_auth_mode": "client_auth", + } + ), + headers={"Content-Type": "application/x-www-form-urlencoded"}, ) - if resp['status'] != '200': - raise RuntimeError('unable to login to Twitter') + if resp["status"] != "200": + raise RuntimeError("unable to login to Twitter") data = dict(parse_qsl(content)) - self._oauth_token = data['oauth_token'] - self._oauth_token_secret = data['oauth_token_secret'] - return self._oauth.Token(self._oauth_token, - self._oauth_token_secret) + self._oauth_token = data["oauth_token"] + self._oauth_token_secret = data["oauth_token_secret"] + return self._oauth.Token(self._oauth_token, self._oauth_token_secret) def make_client(self): """Creates a new oauth client auth a new access token.""" @@ -220,10 +225,12 @@ def tweet(self, status): """Tweets a given status. Status must not exceed 140 chars.""" client = self.make_client() resp, content = client.request( - NEW_TWEET_URL, 'POST', - body=urlencode({'status': status.encode('utf-8')}), - headers={'Content-Type': 'application/x-www-form-urlencoded'}) - return resp['status'] == '200' + NEW_TWEET_URL, + "POST", + body=urlencode({"status": status.encode("utf-8")}), + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + return resp["status"] == "200" def emit(self, record): self.tweet(self.format(record)) @@ -236,9 +243,15 @@ class SlackHandler(Handler, StringFormatterHandlerMixin): slacker library has to be installed. """ - def __init__(self, api_token, channel, level=NOTSET, format_string=None, filter=None, - bubble=False): - + def __init__( + self, + api_token, + channel, + level=NOTSET, + format_string=None, + filter=None, + bubble=False, + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) self.api_token = api_token @@ -246,8 +259,7 @@ def __init__(self, api_token, channel, level=NOTSET, format_string=None, filter= try: from slacker import Slacker except ImportError: - raise RuntimeError('The slacker library is required for ' - 'the SlackHandler.') + raise RuntimeError("The slacker library is required for the SlackHandler.") self.channel = channel self.slack = Slacker(api_token) @@ -256,7 +268,7 @@ def emit(self, record): self.slack.chat.post_message(channel=self.channel, text=self.format(record)) -class JinjaFormatter(object): +class JinjaFormatter: """A formatter object that makes it easy to format using a Jinja 2 template instead of a format string. """ @@ -265,8 +277,7 @@ def __init__(self, template): try: from jinja2 import Template except ImportError: - raise RuntimeError('The jinja2 library is required for ' - 'the JinjaFormatter.') + raise RuntimeError("The jinja2 library is required for the JinjaFormatter.") self.template = Template(template) def __call__(self, record, handler): @@ -293,9 +304,15 @@ class ExternalApplicationHandler(Handler): .. versionadded:: 0.3 """ - def __init__(self, arguments, stdin_format=None, - encoding='utf-8', level=NOTSET, filter=None, - bubble=False): + def __init__( + self, + arguments, + stdin_format=None, + encoding="utf-8", + level=NOTSET, + filter=None, + bubble=False, + ): Handler.__init__(self, level, filter, bubble) self.encoding = encoding self._arguments = list(arguments) @@ -303,14 +320,13 @@ def __init__(self, arguments, stdin_format=None, stdin_format = stdin_format self._stdin_format = stdin_format import subprocess + self._subprocess = subprocess def emit(self, record): - args = [arg.format(record=record) - for arg in self._arguments] + args = [arg.format(record=record) for arg in self._arguments] if self._stdin_format is not None: - stdin_data = (self._stdin_format.format(record=record) - .encode(self.encoding)) + stdin_data = self._stdin_format.format(record=record).encode(self.encoding) stdin = self._subprocess.PIPE else: stdin = None @@ -320,25 +336,24 @@ def emit(self, record): c.wait() -class ColorizingStreamHandlerMixin(object): +class ColorizingStreamHandlerMixin: """A mixin class that does colorizing. .. versionadded:: 0.3 .. versionchanged:: 1.0.0 Added Windows support if `colorama`_ is installed. - .. _`colorama`: https://pypi.org/pypi/colorama + .. _`colorama`: https://pypi.org/project/colorama """ + _use_color = None def force_color(self): - """Force colorizing the stream (`should_colorize` will return True) - """ + """Force colorizing the stream (`should_colorize` will return True)""" self._use_color = True def forbid_color(self): - """Forbid colorizing the stream (`should_colorize` will return False) - """ + """Forbid colorizing the stream (`should_colorize` will return False)""" self._use_color = False def should_colorize(self, record): @@ -347,26 +362,26 @@ def should_colorize(self, record): stream is a tty. If we are executing on Windows, colorama must be installed. """ - if os.name == 'nt': + if os.name == "nt": try: import colorama except ImportError: return False if self._use_color is not None: return self._use_color - isatty = getattr(self.stream, 'isatty', None) + isatty = getattr(self.stream, "isatty", None) return isatty and isatty() def get_color(self, record): """Returns the color for this record.""" if record.level >= ERROR: - return 'red' + return "red" elif record.level >= NOTICE: - return 'yellow' - return 'lightgray' + return "yellow" + return "lightgray" def format(self, record): - rv = super(ColorizingStreamHandlerMixin, self).format(record) + rv = super().format(record) if self.should_colorize(record): color = self.get_color(record) if color: @@ -383,8 +398,9 @@ class ColorizedStderrHandler(ColorizingStreamHandlerMixin, StderrHandler): .. versionchanged:: 1.0 Added Windows support if `colorama`_ is installed. - .. _`colorama`: https://pypi.org/pypi/colorama + .. _`colorama`: https://pypi.org/project/colorama """ + def __init__(self, *args, **kwargs): StderrHandler.__init__(self, *args, **kwargs) @@ -399,16 +415,20 @@ def __init__(self, *args, **kwargs): # backwards compat. Should go away in some future releases -from logbook.handlers import ( - FingersCrossedHandler as FingersCrossedHandlerBase) +from logbook.handlers import FingersCrossedHandler as FingersCrossedHandlerBase class FingersCrossedHandler(FingersCrossedHandlerBase): def __init__(self, *args, **kwargs): FingersCrossedHandlerBase.__init__(self, *args, **kwargs) from warnings import warn - warn(PendingDeprecationWarning('fingers crossed handler changed ' - 'location. It\'s now a core component of Logbook.')) + + warn( + PendingDeprecationWarning( + "fingers crossed handler changed " + "location. It's now a core component of Logbook." + ) + ) class ExceptionHandler(Handler, StringFormatterHandlerMixin): @@ -425,8 +445,10 @@ class ApplicationWarning(Exception): .. versionadded:: 0.3 """ - def __init__(self, exc_type, level=NOTSET, format_string=None, - filter=None, bubble=False): + + def __init__( + self, exc_type, level=NOTSET, format_string=None, filter=None, bubble=False + ): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) self.exc_type = exc_type @@ -454,9 +476,10 @@ class DedupHandler(Handler): message repeated 2 times: foo message repeated 1 times: bar """ - def __init__(self, - format_string='message repeated {count} times: {message}', - *args, **kwargs): + + def __init__( + self, format_string="message repeated {count} times: {message}", *args, **kwargs + ): Handler.__init__(self, bubble=False, *args, **kwargs) self._format_string = format_string self.clear() @@ -490,8 +513,8 @@ def handle(self, record): def flush(self): for record in self._unique_ordered_records: record.message = self._format_string.format( - message=record.message, - count=self._message_to_count[record.message]) + message=record.message, count=self._message_to_count[record.message] + ) # record.dispatcher is the logger who created the message, # it's sometimes supressed (by logbook.info for example) if record.dispatcher is not None: @@ -508,15 +531,17 @@ class RiemannHandler(Handler): A handler that sends logs as events to Riemann. """ - def __init__(self, - host, - port, - message_type="tcp", - ttl=60, - flush_threshold=10, - bubble=False, - filter=None, - level=NOTSET): + def __init__( + self, + host, + port, + message_type="tcp", + ttl=60, + flush_threshold=10, + bubble=False, + filter=None, + level=NOTSET, + ): """ :param host: riemann host :param port: riemann port @@ -525,7 +550,9 @@ def __init__(self, :param flush_threshold: count of events after which we send to riemann """ if riemann_client is None: - raise NotImplementedError("The Riemann handler requires the riemann_client package") # pragma: no cover + raise NotImplementedError( + "The Riemann handler requires the riemann_client package" + ) # pragma: no cover Handler.__init__(self, level, filter, bubble) self.host = host self.port = port @@ -539,33 +566,37 @@ def __init__(self, elif message_type == "test": self.transport = riemann_client.transport.BlankTransport else: - msg = ("Currently supported message types for RiemannHandler are: {0}. \ - {1} is not supported." - .format(",".join(["tcp", "udp", "test"]), message_type)) + msg = "Currently supported message types for RiemannHandler are: {}. \ + {} is not supported.".format( + ",".join(["tcp", "udp", "test"]), message_type + ) raise RuntimeError(msg) def record_to_event(self, record): from time import time + tags = ["log", record.level_name] msg = str(record.exc_info[1]) if record.exc_info else record.msg channel_name = str(record.channel) if record.channel else "unknown" - if any([record.level_name == keywords - for keywords in ["ERROR", "EXCEPTION"]]): + if any([record.level_name == keywords for keywords in ["ERROR", "EXCEPTION"]]): state = "error" else: state = "ok" - return {"metric_f": 1.0, - "tags": tags, - "description": msg, - "time": int(time()), - "ttl": self.ttl, - "host": platform.node(), - "service": "{0}.{1}".format(channel_name, os.getpid()), - "state": state - } + return { + "metric_f": 1.0, + "tags": tags, + "description": msg, + "time": int(time()), + "ttl": self.ttl, + "host": platform.node(), + "service": f"{channel_name}.{os.getpid()}", + "state": state, + } def _flush_events(self): - with riemann_client.client.QueuedClient(self.transport(self.host, self.port)) as cl: + with riemann_client.client.QueuedClient( + self.transport(self.host, self.port) + ) as cl: for event in self.queue: cl.event(**event) cl.flush() diff --git a/logbook/notifiers.py b/src/logbook/notifiers.py similarity index 58% rename from logbook/notifiers.py rename to src/logbook/notifiers.py index ce9468ab..4199adc4 100644 --- a/logbook/notifiers.py +++ b/src/logbook/notifiers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.notifiers ~~~~~~~~~~~~~~~~~ @@ -8,28 +7,24 @@ :copyright: (c) 2010 by Armin Ronacher, Christopher Grebs. :license: BSD, see LICENSE for more details. """ +import base64 import os import sys -import base64 +from http import client as http_client from time import time +from urllib.parse import urlencode -from logbook.base import NOTSET, ERROR, WARNING +from logbook.base import ERROR, NOTSET, WARNING from logbook.handlers import Handler, LimitingHandlerMixin -from logbook.helpers import get_application_name, PY2, http_client, u - -if PY2: - from urllib import urlencode -else: - from urllib.parse import urlencode +from logbook.helpers import get_application_name -def create_notification_handler(application_name=None, level=NOTSET, - icon=None): +def create_notification_handler(application_name=None, level=NOTSET, icon=None): """Creates a handler perfectly fit the current platform. On Linux systems this creates a :class:`LibNotifyHandler`, on OS X systems it will create a :class:`GrowlHandler`. """ - if sys.platform == 'darwin': + if sys.platform == "darwin": return GrowlHandler(application_name, level=level, icon=icon) return LibNotifyHandler(application_name, level=level, icon=icon) @@ -37,8 +32,15 @@ def create_notification_handler(application_name=None, level=NOTSET, class NotificationBaseHandler(Handler, LimitingHandlerMixin): """Baseclass for notification handlers.""" - def __init__(self, application_name=None, record_limit=None, - record_delta=None, level=NOTSET, filter=None, bubble=False): + def __init__( + self, + application_name=None, + record_limit=None, + record_delta=None, + level=NOTSET, + filter=None, + bubble=False, + ): Handler.__init__(self, level, filter, bubble) LimitingHandlerMixin.__init__(self, record_limit, record_delta) if application_name is None: @@ -47,7 +49,7 @@ def __init__(self, application_name=None, record_limit=None, def make_title(self, record): """Called to get the title from the record.""" - return u('%s: %s') % (record.channel, record.level_name.title()) + return f"{record.channel}: {record.level_name.title()}" def make_text(self, record): """Called to get the text of the record.""" @@ -59,29 +61,42 @@ class GrowlHandler(NotificationBaseHandler): py-Growl are installed. """ - def __init__(self, application_name=None, icon=None, host=None, - password=None, record_limit=None, record_delta=None, - level=NOTSET, filter=None, bubble=False): - NotificationBaseHandler.__init__(self, application_name, record_limit, - record_delta, level, filter, bubble) + def __init__( + self, + application_name=None, + icon=None, + host=None, + password=None, + record_limit=None, + record_delta=None, + level=NOTSET, + filter=None, + bubble=False, + ): + NotificationBaseHandler.__init__( + self, application_name, record_limit, record_delta, level, filter, bubble + ) # growl is using the deprecated md5 module, but we really don't need # to see that deprecation warning from warnings import filterwarnings - filterwarnings(module='Growl', category=DeprecationWarning, - action='ignore') + + filterwarnings(module="Growl", category=DeprecationWarning, action="ignore") try: import Growl + self._growl = Growl except ImportError: - raise RuntimeError('The growl module is not available. You have ' - 'to install either growl-py or py-Growl to ' - 'use the GrowlHandler.') + raise RuntimeError( + "The growl module is not available. You have " + "to install either growl-py or py-Growl to " + "use the GrowlHandler." + ) if icon is not None: if not os.path.isfile(icon): - raise IOError('Filename to an icon expected.') + raise OSError("Filename to an icon expected.") icon = self._growl.Image.imageFromPath(icon) else: try: @@ -92,10 +107,17 @@ def __init__(self, application_name=None, icon=None, host=None, self._notifier = self._growl.GrowlNotifier( applicationName=self.application_name, applicationIcon=icon, - notifications=['Notset', 'Debug', 'Info', 'Notice', 'Warning', - 'Error', 'Critical'], + notifications=[ + "Notset", + "Debug", + "Info", + "Notice", + "Warning", + "Error", + "Critical", + ], hostname=host, - password=password + password=password, ) self._notifier.register() @@ -119,11 +141,13 @@ def get_priority(self, record): def emit(self, record): if not self.check_delivery(record)[1]: return - self._notifier.notify(record.level_name.title(), - self.make_title(record), - self.make_text(record), - sticky=self.is_sticky(record), - priority=self.get_priority(record)) + self._notifier.notify( + record.level_name.title(), + self.make_title(record), + self.make_text(record), + sticky=self.is_sticky(record), + priority=self.get_priority(record), + ) class LibNotifyHandler(NotificationBaseHandler): @@ -131,18 +155,29 @@ class LibNotifyHandler(NotificationBaseHandler): If `no_init` is set to `True` the initialization of libnotify is skipped. """ - def __init__(self, application_name=None, icon=None, no_init=False, - record_limit=None, record_delta=None, level=NOTSET, - filter=None, bubble=False): - NotificationBaseHandler.__init__(self, application_name, record_limit, - record_delta, level, filter, bubble) + def __init__( + self, + application_name=None, + icon=None, + no_init=False, + record_limit=None, + record_delta=None, + level=NOTSET, + filter=None, + bubble=False, + ): + NotificationBaseHandler.__init__( + self, application_name, record_limit, record_delta, level, filter, bubble + ) try: import pynotify + self._pynotify = pynotify except ImportError: - raise RuntimeError('The pynotify library is required for ' - 'the LibNotifyHandler.') + raise RuntimeError( + "The pynotify library is required for the LibNotifyHandler." + ) self.icon = icon if not no_init: @@ -154,7 +189,7 @@ def set_notifier_icon(self, notifier, icon): from gtk import gdk except ImportError: # TODO: raise a warning? - raise RuntimeError('The gtk.gdk module is required to set an icon.') + raise RuntimeError("The gtk.gdk module is required to set an icon.") if icon is not None: if not isinstance(icon, gdk.Pixbuf): @@ -183,8 +218,9 @@ def get_urgency(self, record): def emit(self, record): if not self.check_delivery(record)[1]: return - notifier = self._pynotify.Notification(self.make_title(record), - self.make_text(record)) + notifier = self._pynotify.Notification( + self.make_title(record), self.make_text(record) + ) notifier.set_urgency(self.get_urgency(record)) notifier.set_timeout(self.get_expires(record)) self.set_notifier_icon(notifier, self.icon) @@ -195,12 +231,22 @@ class BoxcarHandler(NotificationBaseHandler): """Sends notifications to boxcar.io. Can be forwarded to your iPhone or other compatible device. """ - api_url = 'https://boxcar.io/notifications/' - def __init__(self, email, password, record_limit=None, record_delta=None, - level=NOTSET, filter=None, bubble=False): - NotificationBaseHandler.__init__(self, None, record_limit, - record_delta, level, filter, bubble) + api_url = "https://boxcar.io/notifications/" + + def __init__( + self, + email, + password, + record_limit=None, + record_delta=None, + level=NOTSET, + filter=None, + bubble=False, + ): + NotificationBaseHandler.__init__( + self, None, record_limit, record_delta, level, filter, bubble + ) self.email = email self.password = password @@ -211,19 +257,25 @@ def get_screen_name(self, record): def emit(self, record): if not self.check_delivery(record)[1]: return - body = urlencode({ - 'notification[from_screen_name]': - self.get_screen_name(record).encode('utf-8'), - 'notification[message]': - self.make_text(record).encode('utf-8'), - 'notification[from_remote_service_id]': str(int(time() * 100)) - }) - con = http_client.HTTPSConnection('boxcar.io') - con.request('POST', '/notifications/', headers={ - 'Authorization': 'Basic ' + - base64.b64encode((u('%s:%s') % (self.email, self.password)) - .encode('utf-8')).strip(), - }, body=body) + body = urlencode( + { + "notification[from_screen_name]": self.get_screen_name(record).encode( + "utf-8" + ), + "notification[message]": self.make_text(record).encode("utf-8"), + "notification[from_remote_service_id]": str(int(time() * 100)), + } + ) + con = http_client.HTTPSConnection("boxcar.io") + con.request( + "POST", + "/notifications/", + headers={ + "Authorization": "Basic " + + base64.b64encode(f"{self.email}:{self.password}".encode()).strip(), + }, + body=body, + ) con.close() @@ -232,18 +284,28 @@ class NotifoHandler(NotificationBaseHandler): iPhone, or other compatible device. """ - def __init__(self, application_name=None, username=None, secret=None, - record_limit=None, record_delta=None, level=NOTSET, - filter=None, bubble=False, hide_level=False): + def __init__( + self, + application_name=None, + username=None, + secret=None, + record_limit=None, + record_delta=None, + level=NOTSET, + filter=None, + bubble=False, + hide_level=False, + ): try: import notifo except ImportError: raise RuntimeError( - 'The notifo module is not available. You have ' - 'to install notifo to use the NotifoHandler.' + "The notifo module is not available. You have " + "to install notifo to use the NotifoHandler." ) - NotificationBaseHandler.__init__(self, None, record_limit, - record_delta, level, filter, bubble) + NotificationBaseHandler.__init__( + self, None, record_limit, record_delta, level, filter, bubble + ) self._notifo = notifo self.application_name = application_name self.username = username @@ -251,15 +313,20 @@ def __init__(self, application_name=None, username=None, secret=None, self.hide_level = hide_level def emit(self, record): - if self.hide_level: _level_name = None else: _level_name = self.level_name - self._notifo.send_notification(self.username, self.secret, None, - record.message, self.application_name, - _level_name, None) + self._notifo.send_notification( + self.username, + self.secret, + None, + record.message, + self.application_name, + _level_name, + None, + ) class PushoverHandler(NotificationBaseHandler): @@ -268,13 +335,23 @@ class PushoverHandler(NotificationBaseHandler): or 1, it is set to 0 automatically. """ - def __init__(self, application_name=None, apikey=None, userkey=None, - device=None, priority=0, sound=None, record_limit=None, - record_delta=None, level=NOTSET, filter=None, bubble=False, - max_title_len=100, max_message_len=512): - - super(PushoverHandler, self).__init__(None, record_limit, record_delta, - level, filter, bubble) + def __init__( + self, + application_name=None, + apikey=None, + userkey=None, + device=None, + priority=0, + sound=None, + record_limit=None, + record_delta=None, + level=NOTSET, + filter=None, + bubble=False, + max_title_len=100, + max_message_len=512, + ): + super().__init__(None, record_limit, record_delta, level, filter, bubble) self.application_name = application_name self.apikey = apikey @@ -296,7 +373,7 @@ def __init__(self, application_name=None, apikey=None, userkey=None, def _crop(self, msg, max_len): if max_len is not None and max_len > 0 and len(msg) > max_len: - return "%s..." % (msg[:max_len-3],) + return f"{msg[: max_len - 3]}..." else: return msg @@ -304,20 +381,20 @@ def emit(self, record): message = self._crop(record.message, self.max_message_len) body_dict = { - 'token': self.apikey, - 'user': self.userkey, - 'message': message, - 'priority': self.priority + "token": self.apikey, + "user": self.userkey, + "message": message, + "priority": self.priority, } if self.title is not None: - body_dict['title'] = self.title + body_dict["title"] = self.title if self.device is not None: - body_dict['device'] = self.device + body_dict["device"] = self.device if self.sound is not None: - body_dict['sound'] = self.sound + body_dict["sound"] = self.sound body = urlencode(body_dict) - con = http_client.HTTPSConnection('api.pushover.net') - con.request('POST', '/1/messages.json', body=body) + con = http_client.HTTPSConnection("api.pushover.net") + con.request("POST", "/1/messages.json", body=body) con.close() diff --git a/logbook/queues.py b/src/logbook/queues.py similarity index 89% rename from logbook/queues.py rename to src/logbook/queues.py index 162615b2..fecb35a7 100644 --- a/logbook/queues.py +++ b/src/logbook/queues.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.queues ~~~~~~~~~~~~~~ @@ -9,17 +8,14 @@ :license: BSD, see LICENSE for more details. """ import json -import threading -from threading import Thread, Lock import platform +import threading +from queue import Empty, Full +from queue import Queue as ThreadQueue +from threading import Lock, Thread + from logbook.base import NOTSET, LogRecord, dispatch_record from logbook.handlers import Handler, WrapperHandler -from logbook.helpers import PY2, u - -if PY2: - from Queue import Empty, Full, Queue as ThreadQueue -else: - from queue import Empty, Full, Queue as ThreadQueue class RedisHandler(Handler): @@ -42,25 +38,36 @@ class RedisHandler(Handler): More info about the default buffer size: wp.me/p3tYJu-3b """ - def __init__(self, host='127.0.0.1', port=6379, key='redis', - extra_fields=None, flush_threshold=128, flush_time=1, - level=NOTSET, filter=None, password=False, bubble=True, - context=None, push_method='rpush'): + + def __init__( + self, + host="127.0.0.1", + port=6379, + key="redis", + extra_fields=None, + flush_threshold=128, + flush_time=1, + level=NOTSET, + filter=None, + password=False, + bubble=True, + context=None, + push_method="rpush", + ): Handler.__init__(self, level, filter, bubble) try: import redis from redis import ResponseError except ImportError: - raise RuntimeError('The redis library is required for ' - 'the RedisHandler') + raise RuntimeError("The redis library is required for the RedisHandler") - self.redis = redis.Redis(host=host, port=port, password=password, - decode_responses=True) + self.redis = redis.Redis( + host=host, port=port, password=password, decode_responses=True + ) try: self.redis.ping() except ResponseError: - raise ResponseError( - 'The password provided is apparently incorrect') + raise ResponseError("The password provided is apparently incorrect") self.key = key self.extra_fields = extra_fields or {} self.flush_threshold = flush_threshold @@ -70,16 +77,15 @@ def __init__(self, host='127.0.0.1', port=6379, key='redis', # Set up a thread that flushes the queue every specified seconds self._stop_event = threading.Event() - self._flushing_t = threading.Thread(target=self._flush_task, - args=(flush_time, - self._stop_event)) + self._flushing_t = threading.Thread( + target=self._flush_task, args=(flush_time, self._stop_event) + ) self._flushing_t.daemon = True self._flushing_t.start() def _flush_task(self, time, stop_event): - """Calls the method _flush_buffer every certain time. - """ - while not self._stop_event.isSet(): + """Calls the method _flush_buffer every certain time.""" + while not self._stop_event.is_set(): with self.lock: self._flush_buffer() self._stop_event.wait(time) @@ -111,10 +117,12 @@ def emit(self, record): Extra values are also appended to the message. """ with self.lock: - r = {"message": record.msg, - "host": platform.node(), - "level": record.level_name, - "time": record.time.isoformat()} + r = { + "message": record.msg, + "host": platform.node(), + "level": record.level_name, + "time": record.time.isoformat(), + } r.update(self.extra_fields) r.update(record.kwargs) self.queue.append(json.dumps(r)) @@ -149,25 +157,26 @@ class MessageQueueHandler(Handler): Several other backends are also supported. Refer to the `kombu`_ documentation - .. _kombu: http://kombu.readthedocs.org/en/latest/introduction.html + .. _kombu: https://docs.celeryq.dev/projects/kombu/en/latest/introduction.html """ - def __init__(self, uri=None, queue='logging', level=NOTSET, - filter=None, bubble=False): + def __init__( + self, uri=None, queue="logging", level=NOTSET, filter=None, bubble=False + ): Handler.__init__(self, level, filter, bubble) try: import kombu except ImportError: - raise RuntimeError('The kombu library is required for ' - 'the RabbitMQSubscriber.') + raise RuntimeError( + "The kombu library is required for the RabbitMQSubscriber." + ) if uri: connection = kombu.Connection(uri) self.queue = connection.SimpleQueue(queue) def export_record(self, record): - """Exports the record into a dictionary ready for JSON dumping. - """ + """Exports the record into a dictionary ready for JSON dumping.""" return record.to_dict(json_safe=True) def emit(self, record): @@ -198,14 +207,20 @@ class ZeroMQHandler(Handler): handler = ZeroMQHandler('tcp://127.0.0.1:5000') """ - def __init__(self, uri=None, level=NOTSET, filter=None, bubble=False, - context=None, multi=False): + def __init__( + self, + uri=None, + level=NOTSET, + filter=None, + bubble=False, + context=None, + multi=False, + ): Handler.__init__(self, level, filter, bubble) try: import zmq except ImportError: - raise RuntimeError('The pyzmq library is required for ' - 'the ZeroMQHandler.') + raise RuntimeError("The pyzmq library is required for the ZeroMQHandler.") #: the zero mq context self.context = context or zmq.Context() @@ -225,8 +240,7 @@ def export_record(self, record): return record.to_dict(json_safe=True) def emit(self, record): - self.socket.send(json.dumps( - self.export_record(record)).encode("utf-8")) + self.socket.send(json.dumps(self.export_record(record)).encode("utf-8")) def close(self, linger=-1): self.socket.close(linger) @@ -238,10 +252,11 @@ def __del__(self): # not reachable. # If messages are pending on the socket, we wait 100ms for them to be # sent then we discard them. - self.close(linger=100) + if hasattr(self, "socket"): + self.close(linger=100) -class ThreadController(object): +class ThreadController: """A helper class used by queue subscribers to control the background thread. This is usually created and started in one go by :meth:`~logbook.queues.ZeroMQSubscriber.dispatch_in_background` or @@ -258,7 +273,7 @@ def start(self): """Starts the task thread.""" self.running = True self._thread = Thread(target=self._target) - self._thread.setDaemon(True) + self._thread.daemon = True self._thread.start() def stop(self): @@ -279,7 +294,7 @@ def _target(self): self.setup.pop_thread() -class SubscriberBase(object): +class SubscriberBase: """Baseclass for all subscribers.""" def recv(self, timeout=None): @@ -348,11 +363,12 @@ class MessageQueueSubscriber(SubscriberBase): controller.stop() """ - def __init__(self, uri=None, queue='logging'): + + def __init__(self, uri=None, queue="logging"): try: import kombu except ImportError: - raise RuntimeError('The kombu library is required.') + raise RuntimeError("The kombu library is required.") if uri: connection = kombu.Connection(uri) @@ -428,8 +444,9 @@ def __init__(self, uri=None, context=None, multi=False): try: import zmq except ImportError: - raise RuntimeError('The pyzmq library is required for ' - 'the ZeroMQSubscriber.') + raise RuntimeError( + "The pyzmq library is required for the ZeroMQSubscriber." + ) self._zmq = zmq #: the zero mq context @@ -445,7 +462,7 @@ def __init__(self, uri=None, context=None, multi=False): self.socket = self.context.socket(zmq.SUB) if uri is not None: self.socket.connect(uri) - self.socket.setsockopt_unicode(zmq.SUBSCRIBE, u('')) + self.socket.setsockopt_unicode(zmq.SUBSCRIBE, "") def __del__(self): try: @@ -473,8 +490,7 @@ def recv(self, timeout=None): if not self._zmq.select([self.socket], [], [], timeout)[0]: return rv = self.socket.recv(self._zmq.NOBLOCK) - if not PY2: - rv = rv.decode("utf-8") + rv = rv.decode("utf-8") return LogRecord.from_dict(json.loads(rv)) @@ -487,6 +503,7 @@ def _fix_261_mplog(): """ import logging import multiprocessing + logging.multiprocessing = multiprocessing @@ -553,6 +570,7 @@ class MultiProcessingSubscriber(SubscriberBase): def __init__(self, queue=None): if queue is None: from multiprocessing import Queue + queue = Queue(-1) self.queue = queue _fix_261_mplog() @@ -599,12 +617,13 @@ def recv(self, timeout=None): return LogRecord.from_dict(rv) -class TWHThreadController(object): +class TWHThreadController: """A very basic thread controller that pulls things in from a queue and sends it to a handler. Both queue and handler are taken from the passed :class:`ThreadedWrapperHandler`. """ - class Command(object): + + class Command: stop = object() emit = object() emit_batch = object() @@ -618,13 +637,13 @@ def start(self): """Starts the task thread.""" self.running = True self._thread = Thread(target=self._target) - self._thread.setDaemon(True) + self._thread.daemon = True self._thread.start() def stop(self): """Stops the task thread.""" if self.running: - self.wrapper_handler.queue.put_nowait((self.Command.stop, )) + self.wrapper_handler.queue.put_nowait((self.Command.stop,)) self._thread.join() self._thread = None @@ -636,7 +655,7 @@ def _target(self): self.running = False break elif command is self.Command.emit: - (record, ) = data + (record,) = data self.wrapper_handler.handler.emit(record) elif command is self.Command.emit_batch: record, reason = data @@ -659,7 +678,8 @@ class ThreadedWrapperHandler(WrapperHandler): >>> twh.handler.level_name 'WARNING' """ - _direct_attrs = frozenset(['handler', 'queue', 'controller']) + + _direct_attrs = frozenset(["handler", "queue", "controller"]) def __init__(self, handler, maxsize=0): WrapperHandler.__init__(self, handler) @@ -724,6 +744,7 @@ class SubscriberGroup(SubscriberBase): with target_handler: subscribers.dispatch_forever() """ + def __init__(self, subscribers=None, queue_limit=10): self.members = [] self.queue = ThreadQueue(queue_limit) diff --git a/logbook/ticketing.py b/src/logbook/ticketing.py similarity index 57% rename from logbook/ticketing.py rename to src/logbook/ticketing.py index 7321fa32..510e0db0 100644 --- a/logbook/ticketing.py +++ b/src/logbook/ticketing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ logbook.ticketing ~~~~~~~~~~~~~~~~~ @@ -9,21 +8,22 @@ :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. :license: BSD, see LICENSE for more details. """ -from time import time import json -from logbook.base import NOTSET, level_name_property, LogRecord +from time import time + +from logbook.base import NOTSET, LogRecord, level_name_property from logbook.handlers import Handler, HashingHandlerMixin -from logbook.helpers import cached_property, b, PY2, u +from logbook.helpers import cached_property -class Ticket(object): +class Ticket: """Represents a ticket from the database.""" level_name = level_name_property() def __init__(self, db, row): self.db = db - self.__dict__.update(row) + self.__dict__.update(row._mapping) @cached_property def last_occurrence(self): @@ -32,7 +32,7 @@ def last_occurrence(self): if rv: return rv[0] - def get_occurrences(self, order_by='-time', limit=50, offset=0): + def get_occurrences(self, order_by="-time", limit=50, offset=0): """Returns the occurrences for this ticket.""" return self.db.get_occurrences(self.ticket_id, order_by, limit, offset) @@ -64,14 +64,14 @@ class Occurrence(LogRecord): """Represents an occurrence of a ticket.""" def __init__(self, db, row): - self.update_from_dict(json.loads(row['data'])) + self.update_from_dict(json.loads(row.data)) self.db = db - self.time = row['time'] - self.ticket_id = row['ticket_id'] - self.occurrence_id = row['occurrence_id'] + self.time = row.time + self.ticket_id = row.ticket_id + self.occurrence_id = row.occurrence_id -class BackendBase(object): +class BackendBase: """Provides an abstract interface to various databases.""" def __init__(self, **options): @@ -90,8 +90,7 @@ def count_tickets(self): """Returns the number of tickets.""" raise NotImplementedError() - def get_tickets(self, order_by='-last_occurrence_time', - limit=50, offset=0): + def get_tickets(self, order_by="-last_occurrence_time", limit=50, offset=0): """Selects tickets from the database.""" raise NotImplementedError() @@ -107,7 +106,7 @@ def get_ticket(self, ticket_id): """Return a single ticket with all occurrences.""" raise NotImplementedError() - def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0): + def get_occurrences(self, ticket, order_by="-time", limit=50, offset=0): """Selects occurrences from the database for a ticket.""" raise NotImplementedError() @@ -132,20 +131,20 @@ class SQLAlchemyBackend(BackendBase): """ def setup_backend(self): - from sqlalchemy import create_engine, MetaData - from sqlalchemy.orm import sessionmaker, scoped_session - engine_or_uri = self.options.pop('uri', None) - metadata = self.options.pop('metadata', None) - table_prefix = self.options.pop('table_prefix', 'logbook_') + from sqlalchemy import MetaData, create_engine + from sqlalchemy.orm import scoped_session, sessionmaker + + engine_or_uri = self.options.pop("uri", None) + metadata = self.options.pop("metadata", None) + table_prefix = self.options.pop("table_prefix", "logbook_") - if hasattr(engine_or_uri, 'execute'): + if hasattr(engine_or_uri, "execute"): self.engine = engine_or_uri else: # Pool recycle keeps connections from going stale, # which happens in MySQL Databases # Pool size is more custom for out stack - self.engine = create_engine(engine_or_uri, convert_unicode=True, - pool_recycle=360, pool_size=1000) + self.engine = create_engine(engine_or_uri, pool_recycle=360, pool_size=1000) # Create session factory using session maker session = sessionmaker() @@ -162,7 +161,7 @@ def setup_backend(self): self.table_prefix = table_prefix self.metadata = metadata self.create_tables() - if self.options.get('autocreate_tables', True): + if self.options.get("autocreate_tables", True): self.metadata.create_all(bind=self.engine) def create_tables(self): @@ -172,33 +171,36 @@ def create_tables(self): import sqlalchemy as db def table(name, *args, **kwargs): - return db.Table(self.table_prefix + name, self.metadata, - *args, **kwargs) - self.tickets = table('tickets', - db.Column('ticket_id', db.Integer, - primary_key=True), - db.Column('record_hash', db.String(40), - unique=True), - db.Column('level', db.Integer), - db.Column('channel', db.String(120)), - db.Column('location', db.String(512)), - db.Column('module', db.String(256)), - db.Column('last_occurrence_time', db.DateTime), - db.Column('occurrence_count', db.Integer), - db.Column('solved', db.Boolean), - db.Column('app_id', db.String(80))) - self.occurrences = table('occurrences', - db.Column('occurrence_id', - db.Integer, primary_key=True), - db.Column('ticket_id', db.Integer, - db.ForeignKey(self.table_prefix + - 'tickets.ticket_id')), - db.Column('time', db.DateTime), - db.Column('data', db.Text), - db.Column('app_id', db.String(80))) + return db.Table(self.table_prefix + name, self.metadata, *args, **kwargs) + + self.tickets = table( + "tickets", + db.Column("ticket_id", db.Integer, primary_key=True), + db.Column("record_hash", db.String(40), unique=True), + db.Column("level", db.Integer), + db.Column("channel", db.String(120)), + db.Column("location", db.String(512)), + db.Column("module", db.String(256)), + db.Column("last_occurrence_time", db.DateTime), + db.Column("occurrence_count", db.Integer), + db.Column("solved", db.Boolean), + db.Column("app_id", db.String(80)), + ) + self.occurrences = table( + "occurrences", + db.Column("occurrence_id", db.Integer, primary_key=True), + db.Column( + "ticket_id", + db.Integer, + db.ForeignKey(self.table_prefix + "tickets.ticket_id"), + ), + db.Column("time", db.DateTime), + db.Column("data", db.Text), + db.Column("app_id", db.String(80)), + ) def _order(self, q, table, order_by): - if order_by[0] == '-': + if order_by[0] == "-": return q.order_by(table.c[order_by[1:]].desc()) return q.order_by(table.c[order_by]) @@ -207,33 +209,41 @@ def record_ticket(self, record, data, hash, app_id): # Can use the session instead engine.connection and transaction s = self.session try: - q = self.tickets.select(self.tickets.c.record_hash == hash) - row = s.execute(q).fetchone() + q = self.tickets.select().where(self.tickets.c.record_hash == hash) + row = s.execute(q).one_or_none() if row is None: - row = s.execute(self.tickets.insert().values( - record_hash=hash, - level=record.level, - channel=record.channel or u(''), - location=u('%s:%d') % (record.filename, record.lineno), - module=record.module or u(''), - occurrence_count=0, - solved=False, - app_id=app_id - )) + row = s.execute( + self.tickets.insert().values( + record_hash=hash, + level=record.level, + channel=record.channel or "", + location="%s:%d" % (record.filename, record.lineno), + module=record.module or "", + occurrence_count=0, + solved=False, + app_id=app_id, + ) + ) ticket_id = row.inserted_primary_key[0] else: - ticket_id = row['ticket_id'] - s.execute(self.occurrences.insert() - .values(ticket_id=ticket_id, - time=record.time, - app_id=app_id, - data=json.dumps(data))) + ticket_id = row.ticket_id + s.execute( + self.occurrences.insert().values( + ticket_id=ticket_id, + time=record.time, + app_id=app_id, + data=json.dumps(data), + ) + ) s.execute( self.tickets.update() .where(self.tickets.c.ticket_id == ticket_id) - .values(occurrence_count=self.tickets.c.occurrence_count + 1, - last_occurrence_time=record.time, - solved=False)) + .values( + occurrence_count=self.tickets.c.occurrence_count + 1, + last_occurrence_time=record.time, + solved=False, + ) + ) s.commit() except Exception: s.rollback() @@ -243,43 +253,70 @@ def record_ticket(self, record, data, hash, app_id): def count_tickets(self): """Returns the number of tickets.""" - return self.engine.execute(self.tickets.count()).fetchone()[0] + from sqlalchemy import func, select - def get_tickets(self, order_by='-last_occurrence_time', limit=50, - offset=0): + with self.engine.begin() as conn: + return conn.scalar(select(func.count()).select_from(self.tickets)) + + def get_tickets(self, order_by="-last_occurrence_time", limit=50, offset=0): """Selects tickets from the database.""" - return [Ticket(self, row) for row in self.engine.execute( - self._order(self.tickets.select(), self.tickets, order_by) - .limit(limit).offset(offset)).fetchall()] + with self.engine.begin() as conn: + return [ + Ticket(self, row) + for row in conn.execute( + self._order(self.tickets.select(), self.tickets, order_by) + .limit(limit) + .offset(offset) + ) + ] def solve_ticket(self, ticket_id): """Marks a ticket as solved.""" - self.engine.execute(self.tickets.update() - .where(self.tickets.c.ticket_id == ticket_id) - .values(solved=True)) + with self.engine.begin() as conn: + conn.execute( + self.tickets.update() + .where(self.tickets.c.ticket_id == ticket_id) + .values(solved=True) + ) def delete_ticket(self, ticket_id): """Deletes a ticket from the database.""" - self.engine.execute(self.occurrences.delete() - .where(self.occurrences.c.ticket_id == ticket_id)) - self.engine.execute(self.tickets.delete() - .where(self.tickets.c.ticket_id == ticket_id)) + with self.engine.begin() as conn: + conn.execute( + self.occurrences.delete().where( + self.occurrences.c.ticket_id == ticket_id + ) + ) + conn.execute( + self.tickets.delete().where(self.tickets.c.ticket_id == ticket_id) + ) def get_ticket(self, ticket_id): """Return a single ticket with all occurrences.""" - row = self.engine.execute(self.tickets.select().where( - self.tickets.c.ticket_id == ticket_id)).fetchone() + with self.engine.begin() as conn: + row = conn.execute( + self.tickets.select().where(self.tickets.c.ticket_id == ticket_id) + ).one_or_none() if row is not None: return Ticket(self, row) - def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0): + def get_occurrences(self, ticket, order_by="-time", limit=50, offset=0): """Selects occurrences from the database for a ticket.""" - return [Occurrence(self, row) for row in - self.engine.execute(self._order( - self.occurrences.select() - .where(self.occurrences.c.ticket_id == ticket), - self.occurrences, order_by) - .limit(limit).offset(offset)).fetchall()] + with self.engine.begin() as conn: + return [ + Occurrence(self, row) + for row in conn.execute( + self._order( + self.occurrences.select().where( + self.occurrences.c.ticket_id == ticket + ), + self.occurrences, + order_by, + ) + .limit(limit) + .offset(offset) + ) + ] class MongoDBBackend(BackendBase): @@ -292,11 +329,11 @@ def ticket_id(self): class _FixedOccurrenceClass(Occurrence): def __init__(self, db, row): - self.update_from_dict(json.loads(row['data'])) + self.update_from_dict(json.loads(row["data"])) self.db = db - self.time = row['time'] - self.ticket_id = row['ticket_id'] - self.occurrence_id = row['_id'] + self.time = row["time"] + self.ticket_id = row["ticket_id"] + self.occurrence_id = row["_id"] # TODO: Update connection setup once PYTHON-160 is solved. def setup_backend(self): @@ -304,24 +341,24 @@ def setup_backend(self): from pymongo.connection import Connection try: - from pymongo.uri_parser import parse_uri + from pymongo.uri_parser import parse_uri except ImportError: - from pymongo.connection import _parse_uri as parse_uri + from pymongo.connection import _parse_uri as parse_uri from pymongo.errors import AutoReconnect _connection = None - uri = self.options.pop('uri', u('')) + uri = self.options.pop("uri", "") _connection_attempts = 0 parsed_uri = parse_uri(uri, Connection.PORT) if type(parsed_uri) is tuple: - # pymongo < 2.0 - database = parsed_uri[1] + # pymongo < 2.0 + database = parsed_uri[1] else: - # pymongo >= 2.0 - database = parsed_uri['database'] + # pymongo >= 2.0 + database = parsed_uri["database"] # Handle auto reconnect signals properly while _connection_attempts < 5: @@ -337,94 +374,96 @@ def setup_backend(self): self.database = database # setup correct indexes - database.tickets.ensure_index([('record_hash', ASCENDING)], - unique=True) - database.tickets.ensure_index([('solved', ASCENDING), - ('level', ASCENDING)]) - database.occurrences.ensure_index([('time', DESCENDING)]) + database.tickets.ensure_index([("record_hash", ASCENDING)], unique=True) + database.tickets.ensure_index([("solved", ASCENDING), ("level", ASCENDING)]) + database.occurrences.ensure_index([("time", DESCENDING)]) def _order(self, q, order_by): from pymongo import ASCENDING, DESCENDING - col = '%s' % (order_by[0] == '-' and order_by[1:] or order_by) - if order_by[0] == '-': + + col = "%s" % (order_by[0] == "-" and order_by[1:] or order_by) + if order_by[0] == "-": return q.sort(col, DESCENDING) return q.sort(col, ASCENDING) def _oid(self, ticket_id): from pymongo.objectid import ObjectId + return ObjectId(ticket_id) def record_ticket(self, record, data, hash, app_id): """Records a log record as ticket.""" db = self.database - ticket = db.tickets.find_one({'record_hash': hash}) + ticket = db.tickets.find_one({"record_hash": hash}) if not ticket: doc = { - 'record_hash': hash, - 'level': record.level, - 'channel': record.channel or u(''), - 'location': u('%s:%d') % (record.filename, - record.lineno), - 'module': record.module or u(''), - 'occurrence_count': 0, - 'solved': False, - 'app_id': app_id, + "record_hash": hash, + "level": record.level, + "channel": record.channel or "", + "location": "%s:%d" % (record.filename, record.lineno), + "module": record.module or "", + "occurrence_count": 0, + "solved": False, + "app_id": app_id, } ticket_id = db.tickets.insert(doc) else: - ticket_id = ticket['_id'] + ticket_id = ticket["_id"] - db.tickets.update({'_id': ticket_id}, { - '$inc': { - 'occurrence_count': 1 + db.tickets.update( + {"_id": ticket_id}, + { + "$inc": {"occurrence_count": 1}, + "$set": {"last_occurrence_time": record.time, "solved": False}, }, - '$set': { - 'last_occurrence_time': record.time, - 'solved': False - } - }) + ) # We store occurrences in a seperate collection so that # we can make it a capped collection optionally. - db.occurrences.insert({ - 'ticket_id': self._oid(ticket_id), - 'app_id': app_id, - 'time': record.time, - 'data': json.dumps(data), - }) + db.occurrences.insert( + { + "ticket_id": self._oid(ticket_id), + "app_id": app_id, + "time": record.time, + "data": json.dumps(data), + } + ) def count_tickets(self): """Returns the number of tickets.""" return self.database.tickets.count() - def get_tickets(self, order_by='-last_occurrence_time', limit=50, - offset=0): + def get_tickets(self, order_by="-last_occurrence_time", limit=50, offset=0): """Selects tickets from the database.""" - query = (self._order(self.database.tickets.find(), order_by) - .limit(limit).skip(offset)) + query = ( + self._order(self.database.tickets.find(), order_by) + .limit(limit) + .skip(offset) + ) return [self._FixedTicketClass(self, obj) for obj in query] def solve_ticket(self, ticket_id): """Marks a ticket as solved.""" - self.database.tickets.update({'_id': self._oid(ticket_id)}, - {'solved': True}) + self.database.tickets.update({"_id": self._oid(ticket_id)}, {"solved": True}) def delete_ticket(self, ticket_id): """Deletes a ticket from the database.""" - self.database.occurrences.remove({'ticket_id': self._oid(ticket_id)}) - self.database.tickets.remove({'_id': self._oid(ticket_id)}) + self.database.occurrences.remove({"ticket_id": self._oid(ticket_id)}) + self.database.tickets.remove({"_id": self._oid(ticket_id)}) def get_ticket(self, ticket_id): """Return a single ticket with all occurrences.""" - ticket = self.database.tickets.find_one({'_id': self._oid(ticket_id)}) + ticket = self.database.tickets.find_one({"_id": self._oid(ticket_id)}) if ticket: return Ticket(self, ticket) - def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0): + def get_occurrences(self, ticket, order_by="-time", limit=50, offset=0): """Selects occurrences from the database for a ticket.""" collection = self.database.occurrences - occurrences = self._order(collection.find( - {'ticket_id': self._oid(ticket)} - ), order_by).limit(limit).skip(offset) + occurrences = ( + self._order(collection.find({"ticket_id": self._oid(ticket)}), order_by) + .limit(limit) + .skip(offset) + ) return [self._FixedOccurrenceClass(self, obj) for obj in occurrences] @@ -443,9 +482,8 @@ def hash_record_raw(self, record): hash = HashingHandlerMixin.hash_record_raw(self, record) if self.hash_salt is not None: hash_salt = self.hash_salt - if not PY2 or isinstance(hash_salt, unicode): - hash_salt = hash_salt.encode('utf-8') - hash.update(b('\x00') + hash_salt) + hash_salt = hash_salt.encode("utf-8") + hash.update(b"\x00" + hash_salt) return hash @@ -473,15 +511,23 @@ class TicketingHandler(TicketingBaseHandler): #: :class:`SQLAlchemyBackend`. default_backend = SQLAlchemyBackend - def __init__(self, uri, app_id='generic', level=NOTSET, - filter=None, bubble=False, hash_salt=None, backend=None, - **db_options): + def __init__( + self, + uri, + app_id="generic", + level=NOTSET, + filter=None, + bubble=False, + hash_salt=None, + backend=None, + **db_options, + ): if hash_salt is None: - hash_salt = u('apphash-') + app_id + hash_salt = "apphash-" + app_id TicketingBaseHandler.__init__(self, hash_salt, level, filter, bubble) if backend is None: backend = self.default_backend - db_options['uri'] = uri + db_options["uri"] = uri self.set_backend(backend, **db_options) self.app_id = app_id @@ -502,6 +548,6 @@ def record_ticket(self, record, data, hash): def emit(self, record): """Emits a single record and writes it to the database.""" - hash = self.hash_record(record).encode('utf-8') + hash = self.hash_record(record).encode("utf-8") data = self.process_record(record, hash) self.record_ticket(record, data, hash) diff --git a/logbook/utils.py b/src/logbook/utils.py similarity index 78% rename from logbook/utils.py rename to src/logbook/utils.py index 21df7cc4..db79f258 100644 --- a/logbook/utils.py +++ b/src/logbook/utils.py @@ -1,14 +1,12 @@ -from contextlib import contextmanager import functools import sys import threading +from contextlib import contextmanager -from .base import Logger, DEBUG -from .helpers import string_types - +from .base import DEBUG, Logger -class _SlowContextNotifier(object): +class _SlowContextNotifier: def __init__(self, threshold, func): self.timer = threading.Timer(threshold, func) @@ -20,7 +18,7 @@ def __exit__(self, *_): self.timer.cancel() -_slow_logger = Logger('Slow') +_slow_logger = Logger("Slow") def logged_if_slow(*args, **kwargs): @@ -37,16 +35,18 @@ def logged_if_slow(*args, **kwargs): The remaining parameters are passed to the :meth:`~logbook.base.LoggerMixin.log` method. """ - threshold = kwargs.pop('threshold', 1) - func = kwargs.pop('func', None) + threshold = kwargs.pop("threshold", 1) + func = kwargs.pop("func", None) if func is None: - logger = kwargs.pop('logger', _slow_logger) - level = kwargs.pop('level', DEBUG) + logger = kwargs.pop("logger", _slow_logger) + level = kwargs.pop("level", DEBUG) func = functools.partial(logger.log, level, *args, **kwargs) else: - if 'logger' in kwargs or 'level' in kwargs: - raise TypeError("If using deprecated func parameter, 'logger' and" - " 'level' arguments cannot be passed.") + if "logger" in kwargs or "level" in kwargs: + raise TypeError( + "If using deprecated func parameter, 'logger' and" + " 'level' arguments cannot be passed." + ) func = functools.partial(func, *args, **kwargs) return _SlowContextNotifier(threshold, func) @@ -55,6 +55,7 @@ def logged_if_slow(*args, **kwargs): class _Local(threading.local): enabled = True + _local = _Local() @@ -86,20 +87,21 @@ def forget_deprecation_locations(): def _write_deprecations_if_needed(message, frame_correction): if not _local.enabled: return - caller_location = _get_caller_location(frame_correction=frame_correction+1) + caller_location = _get_caller_location(frame_correction=frame_correction + 1) if caller_location not in _deprecation_locations: - _deprecation_logger.warning(message, frame_correction=frame_correction+1) + _deprecation_logger.warning(message, frame_correction=frame_correction + 1) _deprecation_locations.add(caller_location) def log_deprecation_message(message, frame_correction=0): - _write_deprecations_if_needed("Deprecation message: {0}".format(message), frame_correction=frame_correction+1) - + _write_deprecations_if_needed( + f"Deprecation message: {message}", frame_correction=frame_correction + 1 + ) -class _DeprecatedFunction(object): +class _DeprecatedFunction: def __init__(self, func, message, obj=None, objtype=None): - super(_DeprecatedFunction, self).__init__() + super().__init__() self._func = func self._message = message self._obj = obj @@ -108,7 +110,7 @@ def __init__(self, func, message, obj=None, objtype=None): def _get_underlying_func(self): returned = self._func if isinstance(returned, classmethod): - if hasattr(returned, '__func__'): + if hasattr(returned, "__func__"): returned = returned.__func__ else: returned = returned.__get__(self._objtype).__func__ @@ -116,9 +118,9 @@ def _get_underlying_func(self): def __call__(self, *args, **kwargs): func = self._get_underlying_func() - warning = "{0} is deprecated.".format(self._get_func_str()) + warning = f"{self._get_func_str()} is deprecated." if self._message is not None: - warning += " {0}".format(self._message) + warning += f" {self._message}" _write_deprecations_if_needed(warning, frame_correction=+1) if self._obj is not None: return func(self._obj, *args, **kwargs) @@ -129,15 +131,14 @@ def __call__(self, *args, **kwargs): def _get_func_str(self): func = self._get_underlying_func() if self._objtype is not None: - return '{0}.{1}'.format(self._objtype.__name__, func.__name__) - return '{0}.{1}'.format(func.__module__, func.__name__) + return f"{self._objtype.__name__}.{func.__name__}" + return f"{func.__module__}.{func.__name__}" def __get__(self, obj, objtype): return self.bound_to(obj, objtype) def bound_to(self, obj, objtype): - return _DeprecatedFunction(self._func, self._message, obj=obj, - objtype=objtype) + return _DeprecatedFunction(self._func, self._message, obj=obj, objtype=objtype) @property def __name__(self): @@ -149,8 +150,7 @@ def __doc__(self): if returned: # pylint: disable=no-member returned += "\n.. deprecated\n" # pylint: disable=no-member if self._message: - returned += " {0}".format( - self._message) # pylint: disable=no-member + returned += f" {self._message}" # pylint: disable=no-member return returned @__doc__.setter @@ -171,7 +171,7 @@ def deprecated(func=None, message=None): .. versionadded:: 0.12 """ - if isinstance(func, string_types): + if isinstance(func, str): assert message is None message = func func = None diff --git a/tests/conftest.py b/tests/conftest.py index 0a12dd59..85c049cf 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,19 +1,19 @@ -import sys +from pathlib import Path -import logbook import pytest +import logbook + logbook.StderrHandler().push_application() @pytest.fixture def logger(): - return logbook.Logger('testlogger') + return logbook.Logger("testlogger") @pytest.fixture def active_handler(request, test_handler, activation_strategy): - s = activation_strategy(test_handler) s.activate() @@ -29,10 +29,9 @@ def test_handler(): return logbook.TestHandler() -class ActivationStrategy(object): - +class ActivationStrategy: def __init__(self, handler): - super(ActivationStrategy, self).__init__() + super().__init__() self.handler = handler def activate(self): @@ -50,7 +49,6 @@ def __exit__(self, *_): class ContextEnteringStrategy(ActivationStrategy): - def activate(self): self.handler.__enter__() @@ -59,9 +57,9 @@ def deactivate(self): class PushingStrategy(ActivationStrategy): - def activate(self): from logbook.concurrency import is_gevent_enabled + if is_gevent_enabled(): self.handler.push_greenlet() else: @@ -69,6 +67,7 @@ def activate(self): def deactivate(self): from logbook.concurrency import is_gevent_enabled + if is_gevent_enabled(): self.handler.pop_greenlet() else: @@ -80,9 +79,18 @@ def activation_strategy(request): return request.param -@pytest.fixture -def logfile(tmpdir): - return str(tmpdir.join('logfile.log')) +class CustomPathLike: + def __init__(self, path): + self.path = path + + def __fspath__(self): + return self.path + + +@pytest.fixture(params=[Path, str, CustomPathLike]) +def logfile(tmp_path, request): + path = str(tmp_path / "logfile.log") + return request.param(path) @pytest.fixture @@ -92,26 +100,26 @@ def default_handler(request): request.addfinalizer(returned.pop_application) return returned + try: import gevent except ImportError: pass else: - @pytest.fixture(scope="module", autouse=True, params=[False, True]) + + @pytest.fixture( + scope="module", autouse=True, params=[False, True], ids=["nogevent", "gevent"] + ) def gevent(request): - module_name = getattr(request.module, '__name__', '') - if (not any(s in module_name for s in ('queues', 'processors')) - and request.param): - from logbook.concurrency import enable_gevent, _disable_gevent + module_name = getattr(request.module, "__name__", "") + if ( + not any(s in module_name for s in ("queues", "processors")) + and request.param + ): + from logbook.concurrency import _disable_gevent, enable_gevent + enable_gevent() @request.addfinalizer def fin(): _disable_gevent() - - -def pytest_ignore_collect(path, config): - if 'test_asyncio.py' in path.basename and (sys.version_info.major < 3 or sys.version_info.minor < 5): - return True - - return False diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py index 469554ba..d1367284 100644 --- a/tests/test_asyncio.py +++ b/tests/test_asyncio.py @@ -1,12 +1,10 @@ -import pytest -import logbook import asyncio -from logbook.concurrency import has_contextvars + +import logbook ITERATIONS = 100 -@pytest.mark.skipif(not has_contextvars, reason="Contexvars not available") def test_asyncio_context_management(logger): h1 = logbook.TestHandler() h2 = logbook.TestHandler() @@ -18,10 +16,12 @@ async def task(handler, msg): await asyncio.sleep(0) # allow for context switch - asyncio.get_event_loop().run_until_complete(asyncio.gather(task(h1, 'task1'), task(h2, 'task2'))) + asyncio.get_event_loop().run_until_complete( + asyncio.gather(task(h1, "task1"), task(h2, "task2")) + ) assert len(h1.records) == ITERATIONS - assert all(['task1' == r.msg for r in h1.records]) + assert all(["task1" == r.msg for r in h1.records]) assert len(h2.records) == ITERATIONS - assert all(['task2' == r.msg for r in h2.records]) + assert all(["task2" == r.msg for r in h2.records]) diff --git a/tests/test_ci.py b/tests/test_ci.py deleted file mode 100644 index 54861638..00000000 --- a/tests/test_ci.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -import os - -import pytest - -from .utils import appveyor, travis - -@appveyor -def test_appveyor_speedups(): - if os.environ.get('CYBUILD'): - import logbook._speedups - else: - with pytest.raises(ImportError): - import logbook._speedups - -@travis -def test_travis_speedups(): - if os.environ.get('CYBUILD'): - import logbook._speedups - else: - with pytest.raises(ImportError): - import logbook._speedups diff --git a/tests/test_deadlock.py b/tests/test_deadlock.py index 66e4ad19..037b349b 100644 --- a/tests/test_deadlock.py +++ b/tests/test_deadlock.py @@ -1,8 +1,9 @@ import sys + import logbook -class MyObject(object): +class MyObject: def __init__(self, logger_func): self._logger_func = logger_func @@ -11,7 +12,7 @@ def __str__(self): return "" -class FakeLock(object): +class FakeLock: def __init__(self): self._acquired = False self._deadlock_occurred = False @@ -28,8 +29,7 @@ def release(self): def test_deadlock_in_emit(): logbook_logger = logbook.Logger("logbook") obj = MyObject(logbook_logger.info) - stream_handler = logbook.StreamHandler(stream=sys.stderr, - level=logbook.DEBUG) + stream_handler = logbook.StreamHandler(stream=sys.stderr, level=logbook.DEBUG) stream_handler.lock = FakeLock() with stream_handler.applicationbound(): logbook_logger.info("format this: {}", obj) diff --git a/tests/test_file_handler.py b/tests/test_file_handler.py index 9585832e..70ea2c47 100644 --- a/tests/test_file_handler.py +++ b/tests/test_file_handler.py @@ -1,242 +1,246 @@ +import gzip import os -import pytest import time from datetime import datetime -import logbook -from logbook.helpers import u, xrange -import gzip import brotli -from .utils import capturing_stderr_context, LETTERS +import pytest + +import logbook + +from .utils import LETTERS, capturing_stderr_context def test_file_handler(logfile, activation_strategy, logger): handler = logbook.FileHandler( logfile, - format_string='{record.level_name}:{record.channel}:{record.message}',) + format_string="{record.level_name}:{record.channel}:{record.message}", + ) with activation_strategy(handler): - logger.warn('warning message') + logger.warn("warning message") handler.close() with open(logfile) as f: - assert f.readline() == 'WARNING:testlogger:warning message\n' + assert f.readline() == "WARNING:testlogger:warning message\n" def test_file_handler_unicode(logfile, activation_strategy, logger): with capturing_stderr_context() as captured: with activation_strategy(logbook.FileHandler(logfile)): - logger.info(u('\u0431')) - assert (not captured.getvalue()) + logger.info("\u0431") + assert not captured.getvalue() def test_file_handler_delay(logfile, activation_strategy, logger): handler = logbook.FileHandler( logfile, - format_string='{record.level_name}:{record.channel}:{record.message}', - delay=True) - assert (not os.path.isfile(logfile)) + format_string="{record.level_name}:{record.channel}:{record.message}", + delay=True, + ) + assert not os.path.isfile(logfile) with activation_strategy(handler): - logger.warn('warning message') + logger.warn("warning message") handler.close() with open(logfile) as f: - assert f.readline() == 'WARNING:testlogger:warning message\n' + assert f.readline() == "WARNING:testlogger:warning message\n" def test_monitoring_file_handler(logfile, activation_strategy, logger): - if os.name == 'nt': - pytest.skip( - 'unsupported on windows due to different IO (also unneeded)') + if os.name == "nt": + pytest.skip("unsupported on windows due to different IO (also unneeded)") handler = logbook.MonitoringFileHandler( logfile, - format_string='{record.level_name}:{record.channel}:{record.message}', - delay=True) + format_string="{record.level_name}:{record.channel}:{record.message}", + delay=True, + ) with activation_strategy(handler): - logger.warn('warning message') - os.rename(logfile, logfile + '.old') - logger.warn('another warning message') + logger.warn("warning message") + os.rename(logfile, os.fspath(logfile) + ".old") + logger.warn("another warning message") handler.close() with open(logfile) as f: - assert f.read().strip() == 'WARNING:testlogger:another warning message' + assert f.read().strip() == "WARNING:testlogger:another warning message" def test_custom_formatter(activation_strategy, logfile, logger): def custom_format(record, handler): - return record.level_name + ':' + record.message + return record.level_name + ":" + record.message handler = logbook.FileHandler(logfile) with activation_strategy(handler): handler.formatter = custom_format - logger.warn('Custom formatters are awesome') + logger.warn("Custom formatters are awesome") with open(logfile) as f: - assert f.readline() == 'WARNING:Custom formatters are awesome\n' + assert f.readline() == "WARNING:Custom formatters are awesome\n" def test_rotating_file_handler(logfile, activation_strategy, logger): basename = os.path.basename(logfile) - handler = logbook.RotatingFileHandler(logfile, max_size=2048, - backup_count=3, - ) - handler.format_string = '{record.message}' + handler = logbook.RotatingFileHandler( + logfile, + max_size=2048, + backup_count=3, + ) + handler.format_string = "{record.message}" with activation_strategy(handler): - for c, x in zip(LETTERS, xrange(32)): + for c, x in zip(LETTERS, range(32)): logger.warn(c * 256) - files = [x for x in os.listdir(os.path.dirname(logfile)) - if x.startswith(basename)] + files = [x for x in os.listdir(os.path.dirname(logfile)) if x.startswith(basename)] files.sort() - assert files == [basename, basename + - '.1', basename + '.2', basename + '.3'] + assert files == [basename, basename + ".1", basename + ".2", basename + ".3"] with open(logfile) as f: - assert f.readline().rstrip() == ('C' * 256) - assert f.readline().rstrip() == ('D' * 256) - assert f.readline().rstrip() == ('E' * 256) - assert f.readline().rstrip() == ('F' * 256) + assert f.readline().rstrip() == ("C" * 256) + assert f.readline().rstrip() == ("D" * 256) + assert f.readline().rstrip() == ("E" * 256) + assert f.readline().rstrip() == ("F" * 256) @pytest.mark.parametrize("backup_count", [1, 3]) def test_timed_rotating_file_handler(tmpdir, activation_strategy, backup_count): - basename = str(tmpdir.join('trot.log')) - handler = logbook.TimedRotatingFileHandler( - basename, backup_count=backup_count) - handler.format_string = '[{record.time:%H:%M}] {record.message}' + basename = str(tmpdir.join("trot.log")) + handler = logbook.TimedRotatingFileHandler(basename, backup_count=backup_count) + handler.format_string = "[{record.time:%H:%M}] {record.message}" - def fake_record(message, year, month, day, hour=0, - minute=0, second=0): - lr = logbook.LogRecord('Test Logger', logbook.WARNING, - message) + def fake_record(message, year, month, day, hour=0, minute=0, second=0): + lr = logbook.LogRecord("Test Logger", logbook.WARNING, message) lr.time = datetime(year, month, day, hour, minute, second) return lr with activation_strategy(handler): - for x in xrange(10): - handler.handle(fake_record('First One', 2010, 1, 5, x + 1)) - for x in xrange(20): - handler.handle(fake_record('Second One', 2010, 1, 6, x + 1)) - for x in xrange(10): - handler.handle(fake_record('Third One', 2010, 1, 7, x + 1)) - for x in xrange(20): - handler.handle(fake_record('Last One', 2010, 1, 8, x + 1)) - - files = sorted(x for x in os.listdir(str(tmpdir)) if x.startswith('trot')) - - assert files == ['trot-2010-01-0{0}.log'.format(i) - for i in xrange(5, 9)][-backup_count:] - with open(str(tmpdir.join('trot-2010-01-08.log'))) as f: - assert f.readline().rstrip() == '[01:00] Last One' - assert f.readline().rstrip() == '[02:00] Last One' + for x in range(10): + handler.handle(fake_record("First One", 2010, 1, 5, x + 1)) + for x in range(20): + handler.handle(fake_record("Second One", 2010, 1, 6, x + 1)) + for x in range(10): + handler.handle(fake_record("Third One", 2010, 1, 7, x + 1)) + for x in range(20): + handler.handle(fake_record("Last One", 2010, 1, 8, x + 1)) + + files = sorted(x for x in os.listdir(str(tmpdir)) if x.startswith("trot")) + + assert files == [f"trot-2010-01-0{i}.log" for i in range(5, 9)][-backup_count:] + with open(str(tmpdir.join("trot-2010-01-08.log"))) as f: + assert f.readline().rstrip() == "[01:00] Last One" + assert f.readline().rstrip() == "[02:00] Last One" if backup_count > 1: - with open(str(tmpdir.join('trot-2010-01-07.log'))) as f: - assert f.readline().rstrip() == '[01:00] Third One' - assert f.readline().rstrip() == '[02:00] Third One' + with open(str(tmpdir.join("trot-2010-01-07.log"))) as f: + assert f.readline().rstrip() == "[01:00] Third One" + assert f.readline().rstrip() == "[02:00] Third One" + @pytest.mark.parametrize("backup_count", [1, 3]) -def test_timed_rotating_file_handler__rollover_format(tmpdir, activation_strategy, backup_count): - basename = str(tmpdir.join('trot.log')) +def test_timed_rotating_file_handler__rollover_format( + tmpdir, activation_strategy, backup_count +): + basename = str(tmpdir.join("trot.log")) handler = logbook.TimedRotatingFileHandler( - basename, backup_count=backup_count, - rollover_format='{basename}{ext}.{timestamp}', + basename, + backup_count=backup_count, + rollover_format="{basename}{ext}.{timestamp}", ) - handler.format_string = '[{record.time:%H:%M}] {record.message}' + handler.format_string = "[{record.time:%H:%M}] {record.message}" - def fake_record(message, year, month, day, hour=0, - minute=0, second=0): - lr = logbook.LogRecord('Test Logger', logbook.WARNING, - message) + def fake_record(message, year, month, day, hour=0, minute=0, second=0): + lr = logbook.LogRecord("Test Logger", logbook.WARNING, message) lr.time = datetime(year, month, day, hour, minute, second) return lr with activation_strategy(handler): - for x in xrange(10): - handler.handle(fake_record('First One', 2010, 1, 5, x + 1)) - for x in xrange(20): - handler.handle(fake_record('Second One', 2010, 1, 6, x + 1)) - for x in xrange(10): - handler.handle(fake_record('Third One', 2010, 1, 7, x + 1)) - for x in xrange(20): - handler.handle(fake_record('Last One', 2010, 1, 8, x + 1)) - - files = sorted(x for x in os.listdir(str(tmpdir)) if x.startswith('trot')) - - assert files == ['trot.log.2010-01-0{0}'.format(i) - for i in xrange(5, 9)][-backup_count:] - with open(str(tmpdir.join('trot.log.2010-01-08'))) as f: - assert f.readline().rstrip() == '[01:00] Last One' - assert f.readline().rstrip() == '[02:00] Last One' + for x in range(10): + handler.handle(fake_record("First One", 2010, 1, 5, x + 1)) + for x in range(20): + handler.handle(fake_record("Second One", 2010, 1, 6, x + 1)) + for x in range(10): + handler.handle(fake_record("Third One", 2010, 1, 7, x + 1)) + for x in range(20): + handler.handle(fake_record("Last One", 2010, 1, 8, x + 1)) + + files = sorted(x for x in os.listdir(str(tmpdir)) if x.startswith("trot")) + + assert files == [f"trot.log.2010-01-0{i}" for i in range(5, 9)][-backup_count:] + with open(str(tmpdir.join("trot.log.2010-01-08"))) as f: + assert f.readline().rstrip() == "[01:00] Last One" + assert f.readline().rstrip() == "[02:00] Last One" if backup_count > 1: - with open(str(tmpdir.join('trot.log.2010-01-07'))) as f: - assert f.readline().rstrip() == '[01:00] Third One' - assert f.readline().rstrip() == '[02:00] Third One' + with open(str(tmpdir.join("trot.log.2010-01-07"))) as f: + assert f.readline().rstrip() == "[01:00] Third One" + assert f.readline().rstrip() == "[02:00] Third One" @pytest.mark.parametrize("backup_count", [1, 3]) @pytest.mark.parametrize("preexisting_file", [True, False]) def test_timed_rotating_file_handler__not_timed_filename_for_current( - tmpdir, activation_strategy, backup_count, preexisting_file + tmpdir, activation_strategy, backup_count, preexisting_file ): - basename = str(tmpdir.join('trot.log')) + basename = str(tmpdir.join("trot.log")) if preexisting_file: - with open(basename, 'w') as file: - file.write('contents') + with open(basename, "w") as file: + file.write("contents") jan_first = time.mktime(datetime(2010, 1, 1).timetuple()) os.utime(basename, (jan_first, jan_first)) handler = logbook.TimedRotatingFileHandler( basename, - format_string='[{record.time:%H:%M}] {record.message}', + format_string="[{record.time:%H:%M}] {record.message}", backup_count=backup_count, - rollover_format='{basename}{ext}.{timestamp}', + rollover_format="{basename}{ext}.{timestamp}", timed_filename_for_current=False, ) - def fake_record(message, year, month, day, hour=0, - minute=0, second=0): - lr = logbook.LogRecord('Test Logger', logbook.WARNING, - message) + def fake_record(message, year, month, day, hour=0, minute=0, second=0): + lr = logbook.LogRecord("Test Logger", logbook.WARNING, message) lr.time = datetime(year, month, day, hour, minute, second) return lr with activation_strategy(handler): - for x in xrange(10): - handler.handle(fake_record('First One', 2010, 1, 5, x + 1)) - for x in xrange(20): - handler.handle(fake_record('Second One', 2010, 1, 6, x + 1)) - for x in xrange(10): - handler.handle(fake_record('Third One', 2010, 1, 7, x + 1)) - for x in xrange(20): - handler.handle(fake_record('Last One', 2010, 1, 8, x + 1)) - - computed_files = [x for x in os.listdir(str(tmpdir)) if x.startswith('trot')] - - expected_files = ['trot.log.2010-01-01'] if preexisting_file else [] - expected_files += ['trot.log.2010-01-0{0}'.format(i) for i in xrange(5, 8)] - expected_files += ['trot.log'] + for x in range(10): + handler.handle(fake_record("First One", 2010, 1, 5, x + 1)) + for x in range(20): + handler.handle(fake_record("Second One", 2010, 1, 6, x + 1)) + for x in range(10): + handler.handle(fake_record("Third One", 2010, 1, 7, x + 1)) + for x in range(20): + handler.handle(fake_record("Last One", 2010, 1, 8, x + 1)) + + computed_files = [x for x in os.listdir(str(tmpdir)) if x.startswith("trot")] + + expected_files = ["trot.log.2010-01-01"] if preexisting_file else [] + expected_files += [f"trot.log.2010-01-0{i}" for i in range(5, 8)] + expected_files += ["trot.log"] expected_files = expected_files[-backup_count:] assert sorted(computed_files) == sorted(expected_files) - with open(str(tmpdir.join('trot.log'))) as f: - assert f.readline().rstrip() == '[01:00] Last One' - assert f.readline().rstrip() == '[02:00] Last One' + with open(str(tmpdir.join("trot.log"))) as f: + assert f.readline().rstrip() == "[01:00] Last One" + assert f.readline().rstrip() == "[02:00] Last One" if backup_count > 1: - with open(str(tmpdir.join('trot.log.2010-01-07'))) as f: - assert f.readline().rstrip() == '[01:00] Third One' - assert f.readline().rstrip() == '[02:00] Third One' + with open(str(tmpdir.join("trot.log.2010-01-07"))) as f: + assert f.readline().rstrip() == "[01:00] Third One" + assert f.readline().rstrip() == "[02:00] Third One" + def _decompress(input_file_name, use_gzip=True): if use_gzip: - with gzip.open(input_file_name, 'rb') as in_f: + with gzip.open(input_file_name, "rb") as in_f: return in_f.read().decode() else: - with open(input_file_name, 'rb') as in_f: + with open(input_file_name, "rb") as in_f: return brotli.decompress(in_f.read()).decode() + @pytest.mark.parametrize("use_gzip", [True, False]) def test_compression_file_handler(logfile, activation_strategy, logger, use_gzip): - handler = logbook.GZIPCompressionHandler(logfile) if use_gzip else logbook.BrotliCompressionHandler(logfile) - handler.format_string = '{record.level_name}:{record.channel}:{record.message}' + handler = ( + logbook.GZIPCompressionHandler(logfile) + if use_gzip + else logbook.BrotliCompressionHandler(logfile) + ) + handler.format_string = "{record.level_name}:{record.channel}:{record.message}" with activation_strategy(handler): - logger.warn('warning message') + logger.warn("warning message") handler.close() - assert _decompress(logfile, use_gzip) == 'WARNING:testlogger:warning message\n' + assert _decompress(logfile, use_gzip) == "WARNING:testlogger:warning message\n" diff --git a/tests/test_fingers_crossed_handler.py b/tests/test_fingers_crossed_handler.py index 49f48820..977e324b 100644 --- a/tests/test_fingers_crossed_handler.py +++ b/tests/test_fingers_crossed_handler.py @@ -4,26 +4,25 @@ def test_fingerscrossed(activation_strategy, logger, default_handler): - handler = logbook.FingersCrossedHandler(default_handler, - logbook.WARNING) + handler = logbook.FingersCrossedHandler(default_handler, logbook.WARNING) # if no warning occurs, the infos are not logged with activation_strategy(handler): with capturing_stderr_context() as captured: - logger.info('some info') - assert captured.getvalue() == '' - assert (not handler.triggered) + logger.info("some info") + assert captured.getvalue() == "" + assert not handler.triggered # but if it does, all log messages are output with activation_strategy(handler): with capturing_stderr_context() as captured: - logger.info('some info') - logger.warning('something happened') - logger.info('something else happened') + logger.info("some info") + logger.warning("something happened") + logger.info("something else happened") logs = captured.getvalue() - assert 'some info' in logs - assert 'something happened' in logs - assert 'something else happened' in logs + assert "some info" in logs + assert "something happened" in logs + assert "something else happened" in logs assert handler.triggered @@ -36,42 +35,43 @@ def handler_factory(record, fch): return handler def make_fch(): - return logbook.FingersCrossedHandler(handler_factory, - logbook.WARNING) + return logbook.FingersCrossedHandler(handler_factory, logbook.WARNING) fch = make_fch() with activation_strategy(fch): - logger.info('some info') + logger.info("some info") assert len(handlers) == 0 - logger.warning('a warning') + logger.warning("a warning") assert len(handlers) == 1 - logger.error('an error') + logger.error("an error") assert len(handlers) == 1 assert handlers[0].has_infos assert handlers[0].has_warnings assert handlers[0].has_errors - assert (not handlers[0].has_notices) - assert (not handlers[0].has_criticals) - assert (not handlers[0].has_debugs) + assert not handlers[0].has_notices + assert not handlers[0].has_criticals + assert not handlers[0].has_debugs fch = make_fch() with activation_strategy(fch): - logger.info('some info') - logger.warning('a warning') + logger.info("some info") + logger.warning("a warning") assert len(handlers) == 2 def test_fingerscrossed_buffer_size(activation_strategy): - logger = logbook.Logger('Test') + logger = logbook.Logger("Test") test_handler = logbook.TestHandler() handler = logbook.FingersCrossedHandler(test_handler, buffer_size=3) with activation_strategy(handler): - logger.info('Never gonna give you up') - logger.warn('Aha!') - logger.warn('Moar!') - logger.error('Pure hate!') + logger.info("Never gonna give you up") + logger.warn("Aha!") + logger.warn("Moar!") + logger.error("Pure hate!") - assert test_handler.formatted_records == ['[WARNING] Test: Aha!', - '[WARNING] Test: Moar!', - '[ERROR] Test: Pure hate!'] + assert test_handler.formatted_records == [ + "[WARNING] Test: Aha!", + "[WARNING] Test: Moar!", + "[ERROR] Test: Pure hate!", + ] diff --git a/tests/test_flags.py b/tests/test_flags.py index eb0bf592..f7c5fe02 100644 --- a/tests/test_flags.py +++ b/tests/test_flags.py @@ -1,33 +1,32 @@ -import logbook - import pytest +import logbook + from .utils import capturing_stderr_context def test_error_flag(logger): with capturing_stderr_context() as captured: - with logbook.Flags(errors='print'): - with logbook.Flags(errors='silent'): - logger.warn('Foo {42}', 'aha') - assert captured.getvalue() == '' + with logbook.Flags(errors="print"): + with logbook.Flags(errors="silent"): + logger.warn("Foo {42}", "aha") + assert captured.getvalue() == "" - with logbook.Flags(errors='silent'): - with logbook.Flags(errors='print'): - logger.warn('Foo {42}', 'aha') - assert captured.getvalue() != '' + with logbook.Flags(errors="silent"): + with logbook.Flags(errors="print"): + logger.warn("Foo {42}", "aha") + assert captured.getvalue() != "" with pytest.raises(Exception) as caught: - with logbook.Flags(errors='raise'): - logger.warn('Foo {42}', 'aha') - assert 'Could not format message with provided arguments' in str( - caught.value) + with logbook.Flags(errors="raise"): + logger.warn("Foo {42}", "aha") + assert "Could not format message with provided arguments" in str(caught.value) def test_disable_introspection(logger): with logbook.Flags(introspection=False): with logbook.TestHandler() as h: - logger.warn('Testing') + logger.warn("Testing") assert h.records[0].frame is None assert h.records[0].calling_frame is None assert h.records[0].module is None diff --git a/tests/test_groups.py b/tests/test_groups.py index c10960d4..6f01e466 100644 --- a/tests/test_groups.py +++ b/tests/test_groups.py @@ -3,22 +3,23 @@ def test_groups(logger): def inject_extra(record): - record.extra['foo'] = 'bar' + record.extra["foo"] = "bar" + group = logbook.LoggerGroup(processor=inject_extra) group.level = logbook.ERROR group.add_logger(logger) with logbook.TestHandler() as handler: - logger.warn('A warning') - logger.error('An error') - assert (not handler.has_warning('A warning')) - assert handler.has_error('An error') - assert handler.records[0].extra['foo'] == 'bar' + logger.warn("A warning") + logger.error("An error") + assert not handler.has_warning("A warning") + assert handler.has_error("An error") + assert handler.records[0].extra["foo"] == "bar" def test_group_disabled(): group = logbook.LoggerGroup() - logger1 = logbook.Logger('testlogger1') - logger2 = logbook.Logger('testlogger2') + logger1 = logbook.Logger("testlogger1") + logger2 = logbook.Logger("testlogger2") group.add_logger(logger1) group.add_logger(logger2) @@ -28,8 +29,8 @@ def test_group_disabled(): group.disable() with logbook.TestHandler() as handler: - logger1.warn('Warning 1') - logger2.warn('Warning 2') + logger1.warn("Warning 1") + logger2.warn("Warning 2") assert not handler.has_warnings @@ -38,11 +39,11 @@ def test_group_disabled(): group.enable() with logbook.TestHandler() as handler: - logger1.warn('Warning 1') - logger2.warn('Warning 2') + logger1.warn("Warning 1") + logger2.warn("Warning 2") - assert handler.has_warning('Warning 1') - assert handler.has_warning('Warning 2') + assert handler.has_warning("Warning 1") + assert handler.has_warning("Warning 2") # Test group disabled, but logger explicitly enabled @@ -51,41 +52,41 @@ def test_group_disabled(): logger1.enable() with logbook.TestHandler() as handler: - logger1.warn('Warning 1') - logger2.warn('Warning 2') + logger1.warn("Warning 1") + logger2.warn("Warning 2") - assert handler.has_warning('Warning 1') - assert not handler.has_warning('Warning 2') + assert handler.has_warning("Warning 1") + assert not handler.has_warning("Warning 2") # Logger 1 will be enabled by using force=True group.disable(force=True) with logbook.TestHandler() as handler: - logger1.warn('Warning 1') - logger2.warn('Warning 2') + logger1.warn("Warning 1") + logger2.warn("Warning 2") - assert not handler.has_warning('Warning 1') - assert not handler.has_warning('Warning 2') + assert not handler.has_warning("Warning 1") + assert not handler.has_warning("Warning 2") # Enabling without force means logger 1 will still be disabled. group.enable() with logbook.TestHandler() as handler: - logger1.warn('Warning 1') - logger2.warn('Warning 2') + logger1.warn("Warning 1") + logger2.warn("Warning 2") - assert not handler.has_warning('Warning 1') - assert handler.has_warning('Warning 2') + assert not handler.has_warning("Warning 1") + assert handler.has_warning("Warning 2") # Force logger 1 enabled. group.enable(force=True) with logbook.TestHandler() as handler: - logger1.warn('Warning 1') - logger2.warn('Warning 2') + logger1.warn("Warning 1") + logger2.warn("Warning 2") - assert handler.has_warning('Warning 1') - assert handler.has_warning('Warning 2') + assert handler.has_warning("Warning 1") + assert handler.has_warning("Warning 2") diff --git a/tests/test_handler_errors.py b/tests/test_handler_errors.py index 8095083e..a2ba0992 100644 --- a/tests/test_handler_errors.py +++ b/tests/test_handler_errors.py @@ -1,48 +1,55 @@ import re import sys -import logbook - import pytest +import logbook + from .utils import capturing_stderr_context __file_without_pyc__ = __file__ -if __file_without_pyc__.endswith('.pyc'): +if __file_without_pyc__.endswith(".pyc"): __file_without_pyc__ = __file_without_pyc__[:-1] def test_handler_exception(activation_strategy, logger): class ErroringHandler(logbook.TestHandler): - def emit(self, record): - raise RuntimeError('something bad happened') + raise RuntimeError("something bad happened") with capturing_stderr_context() as stderr: with activation_strategy(ErroringHandler()): - logger.warn('I warn you.') - assert 'something bad happened' in stderr.getvalue() - assert 'I warn you' not in stderr.getvalue() + logger.warn("I warn you.") + assert "something bad happened" in stderr.getvalue() + assert "I warn you" not in stderr.getvalue() def test_formatting_exception(): def make_record(): - return logbook.LogRecord('Test Logger', logbook.WARNING, - 'Hello {foo:invalid}', - kwargs={'foo': 42}, - frame=sys._getframe()) + return logbook.LogRecord( + "Test Logger", + logbook.WARNING, + "Hello {foo:invalid}", + kwargs={"foo": 42}, + frame=sys._getframe(), + ) + record = make_record() with pytest.raises(TypeError) as caught: record.message errormsg = str(caught.value) assert re.search( - 'Could not format message with provided arguments: Invalid ' - '(?:format specifier)|(?:conversion specification)|(?:format spec)', - errormsg, re.M | re.S) + "Could not format message with provided arguments: Invalid " + "(?:format specifier)|(?:conversion specification)|(?:format spec)", + errormsg, + re.M | re.S, + ) assert "msg='Hello {foo:invalid}'" in errormsg - assert 'args=()' in errormsg + assert "args=()" in errormsg assert "kwargs={'foo': 42}" in errormsg assert re.search( - r'Happened in file .*%s, line \d+' % re.escape(__file_without_pyc__), - errormsg, re.M | re.S) + r"Happened in file .*%s, line \d+" % re.escape(__file_without_pyc__), + errormsg, + re.M | re.S, + ) diff --git a/tests/test_handlers.py b/tests/test_handlers.py index 4de4c61b..04502dce 100644 --- a/tests/test_handlers.py +++ b/tests/test_handlers.py @@ -4,32 +4,34 @@ def test_custom_logger(activation_strategy, logger): - client_ip = '127.0.0.1' + client_ip = "127.0.0.1" class CustomLogger(logbook.Logger): - def process_record(self, record): - record.extra['ip'] = client_ip + record.extra["ip"] = client_ip - custom_log = CustomLogger('awesome logger') - fmt = ('[{record.level_name}] {record.channel}: ' - '{record.message} [{record.extra[ip]}]') + custom_log = CustomLogger("awesome logger") + fmt = ( + "[{record.level_name}] {record.channel}: " + "{record.message} [{record.extra[ip]}]" + ) handler = logbook.TestHandler(format_string=fmt) assert handler.format_string == fmt with activation_strategy(handler): - custom_log.warn('Too many sounds') + custom_log.warn("Too many sounds") logger.warn('"Music" playing') assert handler.formatted_records == [ - '[WARNING] awesome logger: Too many sounds [127.0.0.1]', - '[WARNING] testlogger: "Music" playing []'] + "[WARNING] awesome logger: Too many sounds [127.0.0.1]", + '[WARNING] testlogger: "Music" playing []', + ] def test_custom_handling(activation_strategy, logger): class MyTestHandler(logbook.TestHandler): def handle(self, record): - if record.extra.get('flag') != 'testing': + if record.extra.get("flag") != "testing": return False return logbook.TestHandler.handle(self, record) @@ -39,99 +41,98 @@ def handle(self, record): class MyLogger(logbook.Logger): def process_record(self, record): logbook.Logger.process_record(self, record) - record.extra['flag'] = 'testing' + record.extra["flag"] = "testing" log = MyLogger() handler = MyTestHandler() with capturing_stderr_context() as captured: with activation_strategy(handler): - log.warn('From my logger') - logger.warn('From another logger') - assert handler.has_warning('From my logger') - assert 'From another logger' in captured.getvalue() + log.warn("From my logger") + logger.warn("From another logger") + assert handler.has_warning("From my logger") + assert "From another logger" in captured.getvalue() def test_nested_setups(activation_strategy): with capturing_stderr_context() as captured: - logger = logbook.Logger('App') - test_handler = logbook.TestHandler(level='WARNING') + logger = logbook.Logger("App") + test_handler = logbook.TestHandler(level="WARNING") mail_handler = make_fake_mail_handler(bubble=True) - handlers = logbook.NestedSetup([ - logbook.NullHandler(), - test_handler, - mail_handler - ]) + handlers = logbook.NestedSetup( + [logbook.NullHandler(), test_handler, mail_handler] + ) with activation_strategy(handlers): - logger.warn('This is a warning') - logger.error('This is also a mail') + logger.warn("This is a warning") + logger.error("This is also a mail") try: 1 / 0 except Exception: logger.exception() - logger.warn('And here we go straight back to stderr') + logger.warn("And here we go straight back to stderr") - assert test_handler.has_warning('This is a warning') - assert test_handler.has_error('This is also a mail') + assert test_handler.has_warning("This is a warning") + assert test_handler.has_error("This is also a mail") assert len(mail_handler.mails) == 2 - assert 'This is also a mail' in mail_handler.mails[0][2] - assert '1 / 0' in mail_handler.mails[1][2] - assert 'And here we go straight back to stderr' in captured.getvalue() + assert "This is also a mail" in mail_handler.mails[0][2] + assert "1 / 0" in mail_handler.mails[1][2] + assert "And here we go straight back to stderr" in captured.getvalue() with activation_strategy(handlers): - logger.warn('threadbound warning') + logger.warn("threadbound warning") handlers.push_application() try: - logger.warn('applicationbound warning') + logger.warn("applicationbound warning") finally: handlers.pop_application() def test_filtering(activation_strategy): - logger1 = logbook.Logger('Logger1') - logger2 = logbook.Logger('Logger2') + logger1 = logbook.Logger("Logger1") + logger2 = logbook.Logger("Logger2") handler = logbook.TestHandler() outer_handler = logbook.TestHandler() def only_1(record, handler): return record.dispatcher is logger1 + handler.filter = only_1 with activation_strategy(outer_handler): with activation_strategy(handler): - logger1.warn('foo') - logger2.warn('bar') + logger1.warn("foo") + logger2.warn("bar") - assert handler.has_warning('foo', channel='Logger1') - assert (not handler.has_warning('bar', channel='Logger2')) - assert (not outer_handler.has_warning('foo', channel='Logger1')) - assert outer_handler.has_warning('bar', channel='Logger2') + assert handler.has_warning("foo", channel="Logger1") + assert not handler.has_warning("bar", channel="Logger2") + assert not outer_handler.has_warning("foo", channel="Logger1") + assert outer_handler.has_warning("bar", channel="Logger2") def test_different_context_pushing(activation_strategy): h1 = logbook.TestHandler(level=logbook.DEBUG) h2 = logbook.TestHandler(level=logbook.INFO) h3 = logbook.TestHandler(level=logbook.WARNING) - logger = logbook.Logger('Testing') + logger = logbook.Logger("Testing") with activation_strategy(h1): with activation_strategy(h2): with activation_strategy(h3): - logger.warn('Wuuu') - logger.info('still awesome') - logger.debug('puzzled') + logger.warn("Wuuu") + logger.info("still awesome") + logger.debug("puzzled") - assert h1.has_debug('puzzled') - assert h2.has_info('still awesome') - assert h3.has_warning('Wuuu') + assert h1.has_debug("puzzled") + assert h2.has_info("still awesome") + assert h3.has_warning("Wuuu") for handler in h1, h2, h3: assert len(handler.records) == 1 def test_default_handlers(logger): with capturing_stderr_context() as stream: - logger.warn('Aha!') + logger.warn("Aha!") captured = stream.getvalue() - assert 'WARNING: testlogger: Aha!' in captured + assert "WARNING: testlogger: Aha!" in captured diff --git a/tests/test_helpers.py b/tests/test_helpers.py index ec2fd674..d26e7bb3 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -1,6 +1,3 @@ -# -*- coding: utf-8 -*- - -from logbook.helpers import u from datetime import datetime import pytest @@ -9,36 +6,45 @@ def test_jsonhelper(): from logbook.helpers import to_safe_json - class Bogus(object): + class Bogus: def __str__(self): - return 'bogus' - - rv = to_safe_json([ + return "bogus" + + rv = to_safe_json( + [ + None, + "foo", + "jäger", + 1, + datetime(2000, 1, 1), + {"jäger1": 1, "jäger2": 2, Bogus(): 3, "invalid": object()}, + object(), # invalid + ] + ) + + assert rv == [ None, - 'foo', - u('jäger'), + "foo", + "jäger", 1, - datetime(2000, 1, 1), - {'jäger1': 1, u('jäger2'): 2, Bogus(): 3, 'invalid': object()}, - object() # invalid - ]) - - assert rv == [None, u('foo'), u('jäger'), 1, '2000-01-01T00:00:00Z', - {u('jäger1'): 1, u('jäger2'): 2, u('bogus'): 3, - u('invalid'): None}, None] + "2000-01-01T00:00:00Z", + {"jäger1": 1, "jäger2": 2, "bogus": 3, "invalid": None}, + None, + ] def test_datehelpers(): from logbook.helpers import format_iso8601, parse_iso8601 + now = datetime.now() rv = format_iso8601() assert rv[:4] == str(now.year) with pytest.raises(ValueError): - parse_iso8601('foo') - v = parse_iso8601('2000-01-01T00:00:00.12Z') + parse_iso8601("foo") + v = parse_iso8601("2000-01-01T00:00:00.12Z") assert v.microsecond == 120000 - v = parse_iso8601('2000-01-01T12:00:00+01:00') + v = parse_iso8601("2000-01-01T12:00:00+01:00") assert v.hour == 11 - v = parse_iso8601('2000-01-01T12:00:00-01:00') + v = parse_iso8601("2000-01-01T12:00:00-01:00") assert v.hour == 13 diff --git a/tests/test_log_record.py b/tests/test_log_record.py index 1c52fe61..de2c273d 100644 --- a/tests/test_log_record.py +++ b/tests/test_log_record.py @@ -8,38 +8,51 @@ def test_exc_info_when_no_exceptions_exist(logger): with capturing_stderr_context() as captured: with logbook.StreamHandler(sys.stderr): - logger.debug('message', exc_info=True) - assert 'Traceback' not in captured.getvalue() + logger.debug("message", exc_info=True) + assert "Traceback" not in captured.getvalue() def test_exc_info_false(): with logbook.handlers.TestHandler() as handler: - logbook.debug('message here', exc_info=False) + logbook.debug("message here", exc_info=False) [record] = handler.records assert not record.formatted_exception +def test_exc_info_exception_instance(logger): + with logbook.handlers.TestHandler() as handler: + try: + raise ValueError("error here") + except Exception as e: + error = e + logger.exception(exc_info=error) + [record] = handler.records + assert isinstance(record.exc_info, tuple) + assert len(record.exc_info) == 3 + assert "Traceback" in record.formatted_exception + + def test_extradict(active_handler, logger): - logger.warn('Test warning') + logger.warn("Test warning") record = active_handler.records[0] - record.extra['existing'] = 'foo' - assert record.extra['nonexisting'] == '' - assert record.extra['existing'] == 'foo' + record.extra["existing"] = "foo" + assert record.extra["nonexisting"] == "" + assert record.extra["existing"] == "foo" def test_calling_frame(active_handler, logger): - logger.warn('test') + logger.warn("test") assert active_handler.records[0].calling_frame == sys._getframe() def test_frame_correction(active_handler, logger): def inner(): - logger.warn('test', frame_correction=+1) + logger.warn("test", frame_correction=+1) inner() assert active_handler.records[0].calling_frame == sys._getframe() def test_dispatcher(active_handler, logger): - logger.warn('Logbook is too awesome for stdlib') + logger.warn("Logbook is too awesome for stdlib") assert active_handler.records[0].dispatcher == logger diff --git a/tests/test_logbook.py b/tests/test_logbook.py index 04083154..b5301cba 100644 --- a/tests/test_logbook.py +++ b/tests/test_logbook.py @@ -1,28 +1,28 @@ -import logbook - import pytest +import logbook + def test_global_functions(activation_strategy): with activation_strategy(logbook.TestHandler()) as handler: - logbook.debug('a debug message') - logbook.info('an info message') - logbook.warn('warning part 1') - logbook.warning('warning part 2') - logbook.notice('notice') - logbook.error('an error') - logbook.critical('pretty critical') - logbook.log(logbook.CRITICAL, 'critical too') + logbook.debug("a debug message") + logbook.info("an info message") + logbook.warn("warning part 1") + logbook.warning("warning part 2") + logbook.notice("notice") + logbook.error("an error") + logbook.critical("pretty critical") + logbook.log(logbook.CRITICAL, "critical too") - assert handler.has_debug('a debug message') - assert handler.has_info('an info message') - assert handler.has_warning('warning part 1') - assert handler.has_warning('warning part 2') - assert handler.has_notice('notice') - assert handler.has_error('an error') - assert handler.has_critical('pretty critical') - assert handler.has_critical('critical too') - assert handler.records[0].channel == 'Generic' + assert handler.has_debug("a debug message") + assert handler.has_info("an info message") + assert handler.has_warning("warning part 1") + assert handler.has_warning("warning part 2") + assert handler.has_notice("notice") + assert handler.has_error("an error") + assert handler.has_critical("pretty critical") + assert handler.has_critical("critical too") + assert handler.records[0].channel == "Generic" assert handler.records[0].dispatcher is None @@ -30,4 +30,4 @@ def test_level_lookup_failures(): with pytest.raises(LookupError): logbook.get_level_name(37) with pytest.raises(LookupError): - logbook.lookup_level('FOO') + logbook.lookup_level("FOO") diff --git a/tests/test_logger.py b/tests/test_logger.py index 9ac5ab82..536f8087 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -1,14 +1,15 @@ -import logbook import pytest +import logbook + def test_level_properties(logger): assert logger.level == logbook.NOTSET - assert logger.level_name == 'NOTSET' - logger.level_name = 'WARNING' + assert logger.level_name == "NOTSET" + logger.level_name = "WARNING" assert logger.level == logbook.WARNING logger.level = logbook.ERROR - assert logger.level_name == 'ERROR' + assert logger.level_name == "ERROR" def test_reflected_properties(logger): @@ -17,14 +18,14 @@ def test_reflected_properties(logger): assert logger.group == group group.level = logbook.ERROR assert logger.level == logbook.ERROR - assert logger.level_name == 'ERROR' + assert logger.level_name == "ERROR" group.level = logbook.WARNING assert logger.level == logbook.WARNING - assert logger.level_name == 'WARNING' + assert logger.level_name == "WARNING" logger.level = logbook.CRITICAL group.level = logbook.DEBUG assert logger.level == logbook.CRITICAL - assert logger.level_name == 'CRITICAL' + assert logger.level_name == "CRITICAL" group.remove_logger(logger) assert logger.group is None diff --git a/tests/test_logging_api.py b/tests/test_logging_api.py index a9b2c64c..cbe70cb9 100644 --- a/tests/test_logging_api.py +++ b/tests/test_logging_api.py @@ -1,18 +1,18 @@ import pickle import sys -import logbook -from logbook.helpers import iteritems, xrange, u - import pytest +import logbook + def test_basic_logging(active_handler, logger): - logger.warn('This is a warning. Nice hah?') + logger.warn("This is a warning. Nice hah?") - assert active_handler.has_warning('This is a warning. Nice hah?') + assert active_handler.has_warning("This is a warning. Nice hah?") assert active_handler.formatted_records == [ - '[WARNING] testlogger: This is a warning. Nice hah?'] + "[WARNING] testlogger: This is a warning. Nice hah?" + ] def test_exception_catching(active_handler, logger): @@ -24,32 +24,29 @@ def test_exception_catching(active_handler, logger): try: 1 / 0 except Exception: - logger.exception('Awesome') - assert active_handler.has_error('Uncaught exception occurred') - assert active_handler.has_error('Awesome') + logger.exception("Awesome") + assert active_handler.has_error("Uncaught exception occurred") + assert active_handler.has_error("Awesome") assert active_handler.records[0].exc_info is not None - assert '1 / 0' in active_handler.records[0].formatted_exception + assert "1 / 0" in active_handler.records[0].formatted_exception def test_exception_catching_with_unicode(): - """ See https://github.com/getlogbook/logbook/issues/104 - """ + """See https://github.com/getlogbook/logbook/issues/104""" try: - raise Exception(u('\u202a test \u202c')) + raise Exception("\u202a test \u202c") except: - r = logbook.LogRecord('channel', 'DEBUG', 'test', - exc_info=sys.exc_info()) + r = logbook.LogRecord("channel", "DEBUG", "test", exc_info=sys.exc_info()) r.exception_message -@pytest.mark.parametrize('as_tuple', [True, False]) +@pytest.mark.parametrize("as_tuple", [True, False]) def test_exc_info(as_tuple, logger, active_handler): try: 1 / 0 except Exception: exc_info = sys.exc_info() - logger.info("Exception caught", - exc_info=exc_info if as_tuple else True) + logger.info("Exception caught", exc_info=exc_info if as_tuple else True) assert active_handler.records[0].exc_info is not None assert active_handler.records[0].exc_info == exc_info @@ -64,8 +61,8 @@ def test_to_dict(logger, active_handler): exported = record.to_dict() record.close() imported = logbook.LogRecord.from_dict(exported) - for key, value in iteritems(record.__dict__): - if key[0] == '_': + for key, value in record.__dict__.items(): + if key[0] == "_": continue assert value == getattr(imported, key) @@ -79,11 +76,11 @@ def test_pickle(active_handler, logger): record.pull_information() record.close() - for p in xrange(pickle.HIGHEST_PROTOCOL): + for p in range(pickle.HIGHEST_PROTOCOL): exported = pickle.dumps(record, p) imported = pickle.loads(exported) - for key, value in iteritems(record.__dict__): - if key[0] == '_': + for key, value in record.__dict__.items(): + if key[0] == "_": continue imported_value = getattr(imported, key) if isinstance(value, ZeroDivisionError): diff --git a/tests/test_logging_compat.py b/tests/test_logging_compat.py index 7964993c..74f08fae 100644 --- a/tests/test_logging_compat.py +++ b/tests/test_logging_compat.py @@ -1,89 +1,92 @@ import functools +from io import StringIO from random import randrange +import pytest + import logbook import logbook.compat -from logbook.helpers import StringIO - -import pytest from .utils import capturing_stderr_context - __file_without_pyc__ = __file__ if __file_without_pyc__.endswith(".pyc"): __file_without_pyc__ = __file_without_pyc__[:-1] -@pytest.mark.parametrize('set_root_logger_level', [True, False]) +@pytest.mark.parametrize("set_root_logger_level", [True, False]) def test_basic_compat(request, set_root_logger_level): import logging + from logbook.compat import redirected_logging # mimic the default logging setting - request.addfinalizer(functools.partial( - logging.root.setLevel, logging.root.level)) + request.addfinalizer(functools.partial(logging.root.setLevel, logging.root.level)) logging.root.setLevel(logging.WARNING) - name = 'test_logbook-%d' % randrange(1 << 32) + name = "test_logbook-%d" % randrange(1 << 32) logger = logging.getLogger(name) with logbook.TestHandler(bubble=True) as handler: with capturing_stderr_context() as captured: with redirected_logging(set_root_logger_level): - logger.debug('This is from the old system') - logger.info('This is from the old system') - logger.warning('This is from the old %s', 'system') - logger.error('This is from the old system') - logger.critical('This is from the old system') - logger.error('This is a %(what)s %(where)s', {'what': 'mapping', 'where': 'test'}) - assert ('WARNING: %s: This is from the old system' % - name) in captured.getvalue() - assert ('ERROR: %s: This is a mapping test' % - name) in captured.getvalue() + logger.debug("This is from the old system") + logger.info("This is from the old system") + logger.warning("This is from the old %s", "system") + logger.error("This is from the old system") + logger.critical("This is from the old system") + logger.error( + "This is a %(what)s %(where)s", {"what": "mapping", "where": "test"} + ) + assert ( + "WARNING: %s: This is from the old system" % name + ) in captured.getvalue() + assert ("ERROR: %s: This is a mapping test" % name) in captured.getvalue() if set_root_logger_level: assert handler.records[0].level == logbook.DEBUG else: assert handler.records[0].level == logbook.WARNING - assert handler.records[0].msg == 'This is from the old %s' + assert handler.records[0].msg == "This is from the old %s" def test_redirect_logbook(): import logging + out = StringIO() logger = logging.getLogger() - logbook_logger = logbook.Logger('testlogger') + logbook_logger = logbook.Logger("testlogger") old_handlers = logger.handlers[:] handler = logging.StreamHandler(out) - handler.setFormatter(logging.Formatter( - '%(name)s:%(levelname)s:%(message)s')) + handler.setFormatter(logging.Formatter("%(name)s:%(levelname)s:%(message)s")) logger.handlers[:] = [handler] try: with logbook.compat.LoggingHandler(): logbook_logger.warn("This goes to logging") - pieces = out.getvalue().strip().split(':') - assert pieces == ['testlogger', 'WARNING', 'This goes to logging'] + pieces = out.getvalue().strip().split(":") + assert pieces == ["testlogger", "WARNING", "This goes to logging"] finally: logger.handlers[:] = old_handlers from itertools import count + test_warning_redirections_i = count() def test_warning_redirections(): from logbook.compat import redirected_warnings + with logbook.TestHandler() as handler: redirector = redirected_warnings() redirector.start() try: - from warnings import warn, resetwarnings + from warnings import resetwarnings, warn + resetwarnings() - warn(RuntimeWarning('Testing' + str(next(test_warning_redirections_i)))) + warn(RuntimeWarning("Testing" + str(next(test_warning_redirections_i)))) finally: redirector.end() assert len(handler.records) == 1 - assert handler.formatted_records[0].startswith( - '[WARNING] RuntimeWarning: Testing') + assert handler.formatted_records[0].startswith("[WARNING] RuntimeWarning: Testing") assert __file_without_pyc__ in handler.records[0].filename diff --git a/tests/test_logging_times.py b/tests/test_logging_times.py index c87c4e84..a27bf06c 100644 --- a/tests/test_logging_times.py +++ b/tests/test_logging_times.py @@ -1,45 +1,46 @@ from datetime import datetime, timedelta, tzinfo -import logbook - import pytest -from .utils import get_total_delta_seconds +import logbook def test_timedate_format(activation_strategy, logger): """ tests the logbook.set_datetime_format() function """ - FORMAT_STRING = '{record.time:%H:%M:%S.%f} {record.message}' + FORMAT_STRING = "{record.time:%H:%M:%S.%f} {record.message}" handler = logbook.TestHandler(format_string=FORMAT_STRING) with activation_strategy(handler): - logbook.set_datetime_format('utc') + logbook.set_datetime_format("utc") try: - logger.warn('This is a warning.') + logger.warn("This is a warning.") time_utc = handler.records[0].time - logbook.set_datetime_format('local') - logger.warn('This is a warning.') + logbook.set_datetime_format("local") + logger.warn("This is a warning.") time_local = handler.records[1].time finally: # put back the default time factory - logbook.set_datetime_format('utc') + logbook.set_datetime_format("utc") # get the expected difference between local and utc time t1 = datetime.now() t2 = datetime.utcnow() - tz_minutes_diff = get_total_delta_seconds(t1 - t2)/60.0 + tz_minutes_diff = (t1 - t2).total_seconds() / 60.0 if abs(tz_minutes_diff) < 1: - pytest.skip('Cannot test utc/localtime differences ' - 'if they vary by less than one minute...') + pytest.skip( + "Cannot test utc/localtime differences " + "if they vary by less than one minute..." + ) # get the difference between LogRecord local and utc times - logbook_minutes_diff = get_total_delta_seconds(time_local - time_utc)/60.0 + logbook_minutes_diff = (time_local - time_utc).total_seconds() / 60.0 assert abs(logbook_minutes_diff) > 1, ( - 'Localtime does not differ from UTC by more than 1 ' - 'minute (Local: %s, UTC: %s)' % (time_local, time_utc)) + "Localtime does not differ from UTC by more than 1 " + "minute (Local: %s, UTC: %s)" % (time_local, time_utc) + ) ratio = logbook_minutes_diff / tz_minutes_diff @@ -51,11 +52,14 @@ def test_tz_aware(activation_strategy, logger): """ tests logbook.set_datetime_format() with a time zone aware time factory """ + class utc(tzinfo): def tzname(self, dt): - return 'UTC' + return "UTC" + def utcoffset(self, dt): return timedelta(seconds=0) + def dst(self, dt): return timedelta(seconds=0) @@ -64,16 +68,16 @@ def dst(self, dt): def utc_tz(): return datetime.now(tz=utc) - FORMAT_STRING = '{record.time:%H:%M:%S.%f%z} {record.message}' + FORMAT_STRING = "{record.time:%H:%M:%S.%f%z} {record.message}" handler = logbook.TestHandler(format_string=FORMAT_STRING) with activation_strategy(handler): logbook.set_datetime_format(utc_tz) try: - logger.warn('this is a warning.') + logger.warn("this is a warning.") record = handler.records[0] finally: # put back the default time factory - logbook.set_datetime_format('utc') + logbook.set_datetime_format("utc") assert record.time.tzinfo is not None @@ -82,6 +86,7 @@ def test_invalid_time_factory(): """ tests logbook.set_datetime_format() with an invalid time factory callable """ + def invalid_factory(): return False @@ -90,6 +95,6 @@ def invalid_factory(): logbook.set_datetime_format(invalid_factory) finally: # put back the default time factory - logbook.set_datetime_format('utc') + logbook.set_datetime_format("utc") - assert 'Invalid callable value' in str(e.value) + assert "Invalid callable value" in str(e.value) diff --git a/tests/test_mail_handler.py b/tests/test_mail_handler.py index 718d936f..108ef00b 100644 --- a/tests/test_mail_handler.py +++ b/tests/test_mail_handler.py @@ -1,32 +1,26 @@ import base64 import re -import sys +from unittest.mock import call, patch import logbook -from logbook.helpers import u from .utils import capturing_stderr_context, make_fake_mail_handler -try: - from unittest.mock import Mock, call, patch -except ImportError: - from mock import Mock, call, patch - __file_without_pyc__ = __file__ -if __file_without_pyc__.endswith('.pyc'): +if __file_without_pyc__.endswith(".pyc"): __file_without_pyc__ = __file_without_pyc__[:-1] def test_mail_handler(activation_strategy, logger): - subject = u('\xf8nicode') + subject = "\xf8nicode" handler = make_fake_mail_handler(subject=subject) with capturing_stderr_context() as fallback: with activation_strategy(handler): - logger.warn('This is not mailed') + logger.warn("This is not mailed") try: 1 / 0 except Exception: - logger.exception(u('Viva la Espa\xf1a')) + logger.exception("Viva la Espa\xf1a") if not handler.mails: # if sending the mail failed, the reason should be on stderr @@ -34,201 +28,205 @@ def test_mail_handler(activation_strategy, logger): assert len(handler.mails) == 1 sender, receivers, mail = handler.mails[0] - mail = mail.replace('\r', '') + mail = mail.replace("\r", "") assert sender == handler.from_addr - assert '=?utf-8?q?=C3=B8nicode?=' in mail - header, data = mail.split('\n\n', 1) - if 'Content-Transfer-Encoding: base64' in header: - data = base64.b64decode(data).decode('utf-8') - assert re.search(r'Message type:\s+ERROR', data) - assert re.search(r'Location:.*%s' % - re.escape(__file_without_pyc__), data) - assert re.search(r'Module:\s+%s' % __name__, data) - assert re.search(r'Function:\s+test_mail_handler', data) - body = u('Viva la Espa\xf1a') - if sys.version_info < (3, 0): - body = body.encode('utf-8') + assert "=?utf-8?q?=C3=B8nicode?=" in mail + header, data = mail.split("\n\n", 1) + if "Content-Transfer-Encoding: base64" in header: + data = base64.b64decode(data).decode("utf-8") + assert re.search(r"Message type:\s+ERROR", data) + assert re.search(r"Location:.*%s" % re.escape(__file_without_pyc__), data) + assert re.search(r"Module:\s+%s" % __name__, data) + assert re.search(r"Function:\s+test_mail_handler", data) + body = "Viva la Espa\xf1a" assert body in data - assert '\nTraceback (most' in data - assert '1 / 0' in data - assert 'This is not mailed' in fallback.getvalue() + assert "\nTraceback (most" in data + assert "1 / 0" in data + assert "This is not mailed" in fallback.getvalue() def test_mail_handler_batching(activation_strategy, logger): mail_handler = make_fake_mail_handler() handler = logbook.FingersCrossedHandler(mail_handler, reset=True) with activation_strategy(handler): - logger.warn('Testing') - logger.debug('Even more') - logger.error('And this triggers it') - logger.info('Aha') - logger.error('And this triggers it again!') + logger.warn("Testing") + logger.debug("Even more") + logger.error("And this triggers it") + logger.info("Aha") + logger.error("And this triggers it again!") assert len(mail_handler.mails) == 2 mail = mail_handler.mails[0][2] - pieces = mail.split('Log records that led up to this one:') + pieces = mail.split("Log records that led up to this one:") assert len(pieces) == 2 body, rest = pieces - rest = rest.replace('\r', '') + rest = rest.replace("\r", "") - assert re.search(r'Message type:\s+ERROR', body) - assert re.search(r'Module:\s+%s' % __name__, body) - assert re.search(r'Function:\s+test_mail_handler_batching', body) + assert re.search(r"Message type:\s+ERROR", body) + assert re.search(r"Module:\s+%s" % __name__, body) + assert re.search(r"Function:\s+test_mail_handler_batching", body) - related = rest.strip().split('\n\n') + related = rest.strip().split("\n\n") assert len(related) == 2 - assert re.search(r'Message type:\s+WARNING', related[0]) - assert re.search(r'Message type:\s+DEBUG', related[1]) + assert re.search(r"Message type:\s+WARNING", related[0]) + assert re.search(r"Message type:\s+DEBUG", related[1]) - assert 'And this triggers it again' in mail_handler.mails[1][2] + assert "And this triggers it again" in mail_handler.mails[1][2] def test_group_handler_mail_combo(activation_strategy, logger): mail_handler = make_fake_mail_handler(level=logbook.DEBUG) handler = logbook.GroupHandler(mail_handler) with activation_strategy(handler): - logger.error('The other way round') - logger.warn('Testing') - logger.debug('Even more') + logger.error("The other way round") + logger.warn("Testing") + logger.debug("Even more") assert mail_handler.mails == [] assert len(mail_handler.mails) == 1 mail = mail_handler.mails[0][2] - pieces = mail.split('Other log records in the same group:') + pieces = mail.split("Other log records in the same group:") assert len(pieces) == 2 body, rest = pieces - rest = rest.replace('\r', '') + rest = rest.replace("\r", "") - assert re.search(r'Message type:\s+ERROR', body) - assert re.search(r'Module:\s+' + __name__, body) - assert re.search(r'Function:\s+test_group_handler_mail_combo', body) + assert re.search(r"Message type:\s+ERROR", body) + assert re.search(r"Module:\s+" + __name__, body) + assert re.search(r"Function:\s+test_group_handler_mail_combo", body) - related = rest.strip().split('\n\n') + related = rest.strip().split("\n\n") assert len(related) == 2 - assert re.search(r'Message type:\s+WARNING', related[0]) - assert re.search(r'Message type:\s+DEBUG', related[1]) + assert re.search(r"Message type:\s+WARNING", related[0]) + assert re.search(r"Message type:\s+DEBUG", related[1]) def test_mail_handler_arguments(): - with patch('smtplib.SMTP', autospec=True) as mock_smtp: - + with patch("smtplib.SMTP", autospec=True) as mock_smtp: # Test the mail handler with supported arguments before changes to # secure, credentials, and starttls mail_handler = logbook.MailHandler( - from_addr='from@example.com', - recipients='to@example.com', - server_addr=('server.example.com', 465), - credentials=('username', 'password'), - secure=('keyfile', 'certfile')) + from_addr="from@example.com", + recipients="to@example.com", + server_addr=("server.example.com", 465), + credentials=("username", "password"), + secure=("keyfile", "certfile"), + ) mail_handler.get_connection() - assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.call_args == call("server.example.com", 465) assert mock_smtp.method_calls[1] == call().starttls( - keyfile='keyfile', certfile='certfile') - assert mock_smtp.method_calls[3] == call().login('username', 'password') + keyfile="keyfile", certfile="certfile" + ) + assert mock_smtp.method_calls[3] == call().login("username", "password") # Test secure=() mail_handler = logbook.MailHandler( - from_addr='from@example.com', - recipients='to@example.com', - server_addr=('server.example.com', 465), - credentials=('username', 'password'), - secure=()) + from_addr="from@example.com", + recipients="to@example.com", + server_addr=("server.example.com", 465), + credentials=("username", "password"), + secure=(), + ) mail_handler.get_connection() - assert mock_smtp.call_args == call('server.example.com', 465) - assert mock_smtp.method_calls[5] == call().starttls( - certfile=None, keyfile=None) - assert mock_smtp.method_calls[7] == call().login('username', 'password') + assert mock_smtp.call_args == call("server.example.com", 465) + assert mock_smtp.method_calls[5] == call().starttls(certfile=None, keyfile=None) + assert mock_smtp.method_calls[7] == call().login("username", "password") # Test implicit port with string server_addr, dictionary credentials, # dictionary secure. mail_handler = logbook.MailHandler( - from_addr='from@example.com', - recipients='to@example.com', - server_addr='server.example.com', - credentials={'user': 'username', 'password': 'password'}, - secure={'certfile': 'certfile2', 'keyfile': 'keyfile2'}) + from_addr="from@example.com", + recipients="to@example.com", + server_addr="server.example.com", + credentials={"user": "username", "password": "password"}, + secure={"certfile": "certfile2", "keyfile": "keyfile2"}, + ) mail_handler.get_connection() - assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.call_args == call("server.example.com", 465) assert mock_smtp.method_calls[9] == call().starttls( - certfile='certfile2', keyfile='keyfile2') + certfile="certfile2", keyfile="keyfile2" + ) assert mock_smtp.method_calls[11] == call().login( - user='username', password='password') + user="username", password="password" + ) # Test secure=True mail_handler = logbook.MailHandler( - from_addr='from@example.com', - recipients='to@example.com', - server_addr=('server.example.com', 465), - credentials=('username', 'password'), - secure=True) + from_addr="from@example.com", + recipients="to@example.com", + server_addr=("server.example.com", 465), + credentials=("username", "password"), + secure=True, + ) mail_handler.get_connection() - assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.call_args == call("server.example.com", 465) assert mock_smtp.method_calls[13] == call().starttls( - certfile=None, keyfile=None) - assert mock_smtp.method_calls[15] == call().login('username', 'password') + certfile=None, keyfile=None + ) + assert mock_smtp.method_calls[15] == call().login("username", "password") assert len(mock_smtp.method_calls) == 16 # Test secure=False mail_handler = logbook.MailHandler( - from_addr='from@example.com', - recipients='to@example.com', - server_addr=('server.example.com', 465), - credentials=('username', 'password'), - secure=False) + from_addr="from@example.com", + recipients="to@example.com", + server_addr=("server.example.com", 465), + credentials=("username", "password"), + secure=False, + ) mail_handler.get_connection() # starttls not called because we check len of method_calls before and # after this test. - assert mock_smtp.call_args == call('server.example.com', 465) - assert mock_smtp.method_calls[16] == call().login('username', 'password') + assert mock_smtp.call_args == call("server.example.com", 465) + assert mock_smtp.method_calls[16] == call().login("username", "password") assert len(mock_smtp.method_calls) == 17 - with patch('smtplib.SMTP_SSL', autospec=True) as mock_smtp_ssl: + with patch("smtplib.SMTP_SSL", autospec=True) as mock_smtp_ssl: # Test starttls=False mail_handler = logbook.MailHandler( - from_addr='from@example.com', - recipients='to@example.com', - server_addr='server.example.com', - credentials={'user': 'username', 'password': 'password'}, - secure={'certfile': 'certfile', 'keyfile': 'keyfile'}, - starttls=False) + from_addr="from@example.com", + recipients="to@example.com", + server_addr="server.example.com", + credentials={"user": "username", "password": "password"}, + secure={"certfile": "certfile", "keyfile": "keyfile"}, + starttls=False, + ) mail_handler.get_connection() assert mock_smtp_ssl.call_args == call( - 'server.example.com', 465, keyfile='keyfile', certfile='certfile') + "server.example.com", 465, keyfile="keyfile", certfile="certfile" + ) assert mock_smtp_ssl.method_calls[0] == call().login( - user='username', password='password') + user="username", password="password" + ) # Test starttls=False with secure=True mail_handler = logbook.MailHandler( - from_addr='from@example.com', - recipients='to@example.com', - server_addr='server.example.com', - credentials={'user': 'username', 'password': 'password'}, + from_addr="from@example.com", + recipients="to@example.com", + server_addr="server.example.com", + credentials={"user": "username", "password": "password"}, secure=True, - starttls=False) + starttls=False, + ) mail_handler.get_connection() assert mock_smtp_ssl.call_args == call( - 'server.example.com', 465, keyfile=None, certfile=None) + "server.example.com", 465, keyfile=None, certfile=None + ) assert mock_smtp_ssl.method_calls[1] == call().login( - user='username', password='password') - - - - - - + user="username", password="password" + ) diff --git a/tests/test_more.py b/tests/test_more.py index 0d871d5e..606964d6 100644 --- a/tests/test_more.py +++ b/tests/test_more.py @@ -1,30 +1,32 @@ import sys - -import logbook -from logbook.helpers import StringIO +from io import StringIO import pytest +import logbook + from .utils import capturing_stderr_context, missing, require_module -@require_module('jinja2') +@require_module("jinja2") def test_jinja_formatter(logger): from logbook.more import JinjaFormatter - fmter = JinjaFormatter('{{ record.channel }}/{{ record.level_name }}') + + fmter = JinjaFormatter("{{ record.channel }}/{{ record.level_name }}") handler = logbook.TestHandler() handler.formatter = fmter with handler: - logger.info('info') - assert 'testlogger/INFO' in handler.formatted_records + logger.info("info") + assert "testlogger/INFO" in handler.formatted_records -@missing('jinja2') +@missing("jinja2") def test_missing_jinja2(): from logbook.more import JinjaFormatter + # check the RuntimeError is raised with pytest.raises(RuntimeError): - JinjaFormatter('dummy') + JinjaFormatter("dummy") def test_colorizing_support(logger): @@ -32,71 +34,74 @@ def test_colorizing_support(logger): class TestColorizingHandler(ColorizedStderrHandler): def __init__(self, *args, **kwargs): - super(TestColorizingHandler, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._obj_stream = StringIO() @property def stream(self): return self._obj_stream - with TestColorizingHandler(format_string='{record.message}') as handler: + with TestColorizingHandler(format_string="{record.message}") as handler: handler.force_color() - logger.error('An error') - logger.warn('A warning') - logger.debug('A debug message') - lines = handler.stream.getvalue().rstrip('\n').splitlines() + logger.error("An error") + logger.warn("A warning") + logger.debug("A debug message") + lines = handler.stream.getvalue().rstrip("\n").splitlines() assert lines == [ - '\x1b[31;01mAn error\x1b[39;49;00m', - '\x1b[33;01mA warning\x1b[39;49;00m', - '\x1b[37mA debug message\x1b[39;49;00m'] + "\x1b[31;01mAn error\x1b[39;49;00m", + "\x1b[33;01mA warning\x1b[39;49;00m", + "\x1b[37mA debug message\x1b[39;49;00m", + ] - with TestColorizingHandler(format_string='{record.message}') as handler: + with TestColorizingHandler(format_string="{record.message}") as handler: handler.forbid_color() - logger.error('An error') - logger.warn('A warning') - logger.debug('A debug message') - lines = handler.stream.getvalue().rstrip('\n').splitlines() - assert lines == ['An error', 'A warning', 'A debug message'] - + logger.error("An error") + logger.warn("A warning") + logger.debug("A debug message") + lines = handler.stream.getvalue().rstrip("\n").splitlines() + assert lines == ["An error", "A warning", "A debug message"] def test_tagged(default_handler): - from logbook.more import TaggingLogger, TaggingHandler + from logbook.more import TaggingHandler, TaggingLogger + stream = StringIO() second_handler = logbook.StreamHandler(stream) - logger = TaggingLogger('name', ['cmd']) - handler = TaggingHandler(dict( - info=default_handler, - cmd=second_handler, - both=[default_handler, second_handler], - )) + logger = TaggingLogger("name", ["cmd"]) + handler = TaggingHandler( + dict( + info=default_handler, + cmd=second_handler, + both=[default_handler, second_handler], + ) + ) handler.bubble = False with handler: with capturing_stderr_context() as captured: - logger.log('info', 'info message') - logger.log('both', 'all message') - logger.cmd('cmd message') + logger.log("info", "info message") + logger.log("both", "all message") + logger.cmd("cmd message") stderr = captured.getvalue() - assert 'info message' in stderr - assert 'all message' in stderr - assert 'cmd message' not in stderr + assert "info message" in stderr + assert "all message" in stderr + assert "cmd message" not in stderr stringio = stream.getvalue() - assert 'info message' not in stringio - assert 'all message' in stringio - assert 'cmd message' in stringio + assert "info message" not in stringio + assert "all message" in stringio + assert "cmd message" in stringio def test_tagging_logger(default_handler): from logbook import StderrHandler from logbook.more import TaggingLogger - logger = TaggingLogger('tagged', ['a', 'b']) + logger = TaggingLogger("tagged", ["a", "b"]) handler = StderrHandler(format_string="{record.msg}|{record.extra[tags]}") with handler: @@ -114,19 +119,28 @@ def test_tagging_logger(default_handler): def test_external_application_handler(tmpdir, logger): from logbook.more import ExternalApplicationHandler as Handler - fn = tmpdir.join('tempfile') - handler = Handler([sys.executable, '-c', r'''if 1: - f = open(%(tempfile)s, 'w') + + fn = tmpdir.join("tempfile") + handler = Handler( + [ + sys.executable, + "-c", + r"""if 1: + f = open({tempfile}, 'w') try: - f.write('{record.message}\n') + f.write('{{record.message}}\n') finally: f.close() - ''' % {'tempfile': repr(str(fn))}]) + """.format( + tempfile=repr(str(fn)) + ), + ] + ) with handler: - logger.error('this is a really bad idea') + logger.error("this is a really bad idea") with fn.open() as rf: contents = rf.read().strip() - assert contents == 'this is a really bad idea' + assert contents == "this is a really bad idea" def test_exception_handler(logger): @@ -134,39 +148,43 @@ def test_exception_handler(logger): with ExceptionHandler(ValueError): with pytest.raises(ValueError) as caught: - logger.info('here i am') - assert 'INFO: testlogger: here i am' in caught.value.args[0] + logger.info("here i am") + assert "INFO: testlogger: here i am" in caught.value.args[0] def test_exception_handler_specific_level(logger): from logbook.more import ExceptionHandler + with logbook.TestHandler() as test_handler: with pytest.raises(ValueError) as caught: - with ExceptionHandler(ValueError, level='WARNING'): - logger.info('this is irrelevant') - logger.warn('here i am') - assert 'WARNING: testlogger: here i am' in caught.value.args[0] - assert 'this is irrelevant' in test_handler.records[0].message + with ExceptionHandler(ValueError, level="WARNING"): + logger.info("this is irrelevant") + logger.warn("here i am") + assert "WARNING: testlogger: here i am" in caught.value.args[0] + assert "this is irrelevant" in test_handler.records[0].message def test_dedup_handler(logger): from logbook.more import DedupHandler + with logbook.TestHandler() as test_handler: with DedupHandler(): - logger.info('foo') - logger.info('bar') - logger.info('foo') + logger.info("foo") + logger.info("bar") + logger.info("foo") assert 2 == len(test_handler.records) - assert 'message repeated 2 times: foo' in test_handler.records[0].message - assert 'message repeated 1 times: bar' in test_handler.records[1].message - + assert "message repeated 2 times: foo" in test_handler.records[0].message + assert "message repeated 1 times: bar" in test_handler.records[1].message -class TestRiemannHandler(object): +class TestRiemannHandler: @require_module("riemann_client") def test_happy_path(self, logger): from logbook.more import RiemannHandler - riemann_handler = RiemannHandler("127.0.0.1", 5555, message_type="test", level=logbook.INFO) + + riemann_handler = RiemannHandler( + "127.0.0.1", 5555, message_type="test", level=logbook.INFO + ) null_handler = logbook.NullHandler() with null_handler.applicationbound(): with riemann_handler: @@ -189,17 +207,21 @@ def test_happy_path(self, logger): @require_module("riemann_client") def test_incorrect_type(self): from logbook.more import RiemannHandler + with pytest.raises(RuntimeError): RiemannHandler("127.0.0.1", 5555, message_type="fancy_type") @require_module("riemann_client") def test_flush(self, logger): from logbook.more import RiemannHandler - riemann_handler = RiemannHandler("127.0.0.1", - 5555, - message_type="test", - flush_threshold=2, - level=logbook.INFO) + + riemann_handler = RiemannHandler( + "127.0.0.1", + 5555, + message_type="test", + flush_threshold=2, + level=logbook.INFO, + ) null_handler = logbook.NullHandler() with null_handler.applicationbound(): with riemann_handler: diff --git a/tests/test_nteventlog_handler.py b/tests/test_nteventlog_handler.py index 9c27beb9..2616a7fc 100644 --- a/tests/test_nteventlog_handler.py +++ b/tests/test_nteventlog_handler.py @@ -1,52 +1,60 @@ import os -import logbook import pytest +import logbook + from .utils import require_module -@require_module('win32con') -@require_module('win32evtlog') -@require_module('win32evtlogutil') -@pytest.mark.skipif(os.environ.get('ENABLE_LOGBOOK_NTEVENTLOG_TESTS') is None, - reason="Don't clutter NT Event Log unless enabled.") +@require_module("win32con") +@require_module("win32evtlog") +@require_module("win32evtlogutil") +@pytest.mark.skipif( + os.environ.get("ENABLE_LOGBOOK_NTEVENTLOG_TESTS") is None, + reason="Don't clutter NT Event Log unless enabled.", +) def test_nteventlog_handler(): from win32con import ( - EVENTLOG_ERROR_TYPE, EVENTLOG_INFORMATION_TYPE, EVENTLOG_WARNING_TYPE) + EVENTLOG_ERROR_TYPE, + EVENTLOG_INFORMATION_TYPE, + EVENTLOG_WARNING_TYPE, + ) from win32evtlog import ( - EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ, OpenEventLog, - ReadEventLog) + EVENTLOG_BACKWARDS_READ, + EVENTLOG_SEQUENTIAL_READ, + OpenEventLog, + ReadEventLog, + ) from win32evtlogutil import SafeFormatMessage - logger = logbook.Logger('Test Logger') + logger = logbook.Logger("Test Logger") - with logbook.NTEventLogHandler('Logbook Test Suite'): - logger.info('The info log message.') - logger.warning('The warning log message.') - logger.error('The error log message.') + with logbook.NTEventLogHandler("Logbook Test Suite"): + logger.info("The info log message.") + logger.warning("The warning log message.") + logger.error("The error log message.") def iter_event_log(handle, flags, offset): while True: events = ReadEventLog(handle, flags, offset) - for event in events: - yield event + yield from events if not events: break - handle = OpenEventLog(None, 'Application') + handle = OpenEventLog(None, "Application") flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ for event in iter_event_log(handle, flags, 0): source = str(event.SourceName) - if source == 'Logbook Test Suite': - message = SafeFormatMessage(event, 'Application') - if 'Message Level: INFO' in message: - assert 'The info log message' in message + if source == "Logbook Test Suite": + message = SafeFormatMessage(event, "Application") + if "Message Level: INFO" in message: + assert "The info log message" in message assert event.EventType == EVENTLOG_INFORMATION_TYPE - if 'Message Level: WARNING' in message: - assert 'The warning log message' in message + if "Message Level: WARNING" in message: + assert "The warning log message" in message assert event.EventType == EVENTLOG_WARNING_TYPE - if 'Message Level: ERROR' in message: - assert 'The error log message' in message + if "Message Level: ERROR" in message: + assert "The error log message" in message assert event.EventType == EVENTLOG_ERROR_TYPE diff --git a/tests/test_null_handler.py b/tests/test_null_handler.py index c84dc7d5..ac120cbb 100644 --- a/tests/test_null_handler.py +++ b/tests/test_null_handler.py @@ -6,31 +6,33 @@ def test_null_handler(activation_strategy, logger): with capturing_stderr_context() as captured: with activation_strategy(logbook.NullHandler()): - with activation_strategy(logbook.TestHandler(level='ERROR')) as handler: - logger.error('An error') - logger.warn('A warning') - assert captured.getvalue() == '' - assert (not handler.has_warning('A warning')) - assert handler.has_error('An error') + with activation_strategy(logbook.TestHandler(level="ERROR")) as handler: + logger.error("An error") + logger.warn("A warning") + assert captured.getvalue() == "" + assert not handler.has_warning("A warning") + assert handler.has_error("An error") def test_blackhole_setting(activation_strategy): null_handler = logbook.NullHandler() heavy_init = logbook.LogRecord.heavy_init with activation_strategy(null_handler): + def new_heavy_init(self): - raise RuntimeError('should not be triggered') + raise RuntimeError("should not be triggered") + logbook.LogRecord.heavy_init = new_heavy_init try: with activation_strategy(null_handler): - logbook.warn('Awesome') + logbook.warn("Awesome") finally: logbook.LogRecord.heavy_init = heavy_init null_handler.bubble = True with capturing_stderr_context() as captured: - logbook.warning('Not a blockhole') - assert captured.getvalue() != '' + logbook.warning("Not a blockhole") + assert captured.getvalue() != "" def test_null_handler_filtering(activation_strategy): @@ -46,5 +48,5 @@ def test_null_handler_filtering(activation_strategy): logger1.warn("1") logger2.warn("2") - assert outer.has_warning('2', channel='2') - assert (not outer.has_warning('1', channel='1')) + assert outer.has_warning("2", channel="2") + assert not outer.has_warning("1", channel="1") diff --git a/tests/test_processors.py b/tests/test_processors.py index daaf92a3..0aa55321 100644 --- a/tests/test_processors.py +++ b/tests/test_processors.py @@ -7,7 +7,8 @@ def test_handler_filter_after_processor(activation_strategy, logger): handler = make_fake_mail_handler( - format_string=dedent(''' + format_string=dedent( + """ Subject: Application Error for {record.extra[path]} [{record.extra[method]}] Message type: {record.level_name} @@ -21,20 +22,22 @@ def test_handler_filter_after_processor(activation_strategy, logger): Message: {record.message} - ''').lstrip(), - filter=lambda r, h: 'ip' in r.extra, - bubble=False) + """ + ).lstrip(), + filter=lambda r, h: "ip" in r.extra, + bubble=False, + ) - class Request(object): - remote_addr = '127.0.0.1' - method = 'GET' - path = '/index.html' + class Request: + remote_addr = "127.0.0.1" + method = "GET" + path = "/index.html" def handle_request(request): def inject_extra(record): - record.extra['ip'] = request.remote_addr - record.extra['method'] = request.method - record.extra['path'] = request.path + record.extra["ip"] = request.remote_addr + record.extra["method"] = request.method + record.extra["path"] = request.path processor = logbook.Processor(inject_extra) with activation_strategy(processor): @@ -43,20 +46,21 @@ def inject_extra(record): try: 1 / 0 except Exception: - logger.exception('Exception happened during request') + logger.exception("Exception happened during request") finally: handler.pop_thread() handle_request(Request()) assert len(handler.mails) == 1 mail = handler.mails[0][2] - assert 'Subject: Application Error for /index.html [GET]' in mail - assert '1 / 0' in mail + assert "Subject: Application Error for /index.html [GET]" in mail + assert "1 / 0" in mail def test_handler_processors(activation_strategy, logger): handler = make_fake_mail_handler( - format_string=dedent(''' + format_string=dedent( + """ Subject: Application Error for {record.extra[path]} [{record.extra[method]}] Message type: {record.level_name} @@ -70,18 +74,20 @@ def test_handler_processors(activation_strategy, logger): Message: {record.message} - ''').lstrip()) + """ + ).lstrip() + ) - class Request(object): - remote_addr = '127.0.0.1' - method = 'GET' - path = '/index.html' + class Request: + remote_addr = "127.0.0.1" + method = "GET" + path = "/index.html" def handle_request(request): def inject_extra(record): - record.extra['ip'] = request.remote_addr - record.extra['method'] = request.method - record.extra['path'] = request.path + record.extra["ip"] = request.remote_addr + record.extra["method"] = request.method + record.extra["path"] = request.path processor = logbook.Processor(inject_extra) with activation_strategy(processor): @@ -90,12 +96,12 @@ def inject_extra(record): try: 1 / 0 except Exception: - logger.exception('Exception happened during request') + logger.exception("Exception happened during request") finally: handler.pop_thread() handle_request(Request()) assert len(handler.mails) == 1 mail = handler.mails[0][2] - assert 'Subject: Application Error for /index.html [GET]' in mail - assert '1 / 0' in mail + assert "Subject: Application Error for /index.html [GET]" in mail + assert "1 / 0" in mail diff --git a/tests/test_queues.py b/tests/test_queues.py index 63bf0157..80bf4972 100644 --- a/tests/test_queues.py +++ b/tests/test_queues.py @@ -1,22 +1,23 @@ -# -*- coding: utf-8 -*- import os import socket import time -from .utils import require_module, missing, LETTERS +import pytest import logbook -from logbook.helpers import u -import pytest +from .utils import LETTERS, missing, require_module + +REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") +REDIS_PORT = int(os.environ.get("REDIS_PORT", "6379")) -@require_module('zmq') +@require_module("zmq") def test_zeromq_handler(logger, handlers, subscriber): tests = [ - u('Logging something'), - u('Something with umlauts äöü'), - u('Something else for good measure'), + "Logging something", + "Something with umlauts äöü", + "Something else for good measure", ] for test in tests: for handler in handlers: @@ -27,15 +28,15 @@ def test_zeromq_handler(logger, handlers, subscriber): assert record.channel == logger.name -@require_module('zmq') +@require_module("zmq") def test_zeromq_background_thread(logger, handlers, subscriber): test_handler = logbook.TestHandler() controller = subscriber.dispatch_in_background(test_handler) for handler in handlers: with handler: - logger.warn('This is a warning') - logger.error('This is an error') + logger.warn("This is a warning") + logger.error("This is an error") # stop the controller. This will also stop the loop and join the # background process. Before that we give it a fraction of a second @@ -43,39 +44,43 @@ def test_zeromq_background_thread(logger, handlers, subscriber): time.sleep(0.5) controller.stop() - assert test_handler.has_warning('This is a warning') - assert test_handler.has_error('This is an error') + assert test_handler.has_warning("This is a warning") + assert test_handler.has_error("This is an error") -@missing('zmq') +@missing("zmq") def test_missing_zeromq(): from logbook.queues import ZeroMQHandler, ZeroMQSubscriber + with pytest.raises(RuntimeError): - ZeroMQHandler('tcp://127.0.0.1:42000') + ZeroMQHandler("tcp://127.0.0.1:42000") with pytest.raises(RuntimeError): - ZeroMQSubscriber('tcp://127.0.0.1:42000') + ZeroMQSubscriber("tcp://127.0.0.1:42000") -class MultiProcessingHandlerSendBack(object): +class MultiProcessingHandlerSendBack: def __init__(self, queue): self.queue = queue def __call__(self): from logbook.queues import MultiProcessingHandler + handler = MultiProcessingHandler(self.queue) handler.push_thread() try: - logbook.warn('Hello World') + logbook.warn("Hello World") finally: handler.pop_thread() -@require_module('multiprocessing') +@require_module("multiprocessing") def test_multi_processing_handler(): - if os.getenv('APPVEYOR') == 'True': - pytest.skip('Test hangs on AppVeyor CI') + if os.getenv("APPVEYOR") == "True": + pytest.skip("Test hangs on AppVeyor CI") from multiprocessing import Process, Queue + from logbook.queues import MultiProcessingSubscriber + queue = Queue(-1) test_handler = logbook.TestHandler() subscriber = MultiProcessingSubscriber(queue) @@ -86,146 +91,158 @@ def test_multi_processing_handler(): with test_handler: subscriber.dispatch_once() - assert test_handler.has_warning('Hello World') + assert test_handler.has_warning("Hello World") class BatchTestHandler(logbook.TestHandler): def __init__(self, *args, **kwargs): - super(BatchTestHandler, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.batches = [] def emit(self, record): - super(BatchTestHandler, self).emit(record) + super().emit(record) self.batches.append([record]) def emit_batch(self, records, reason): for record in records: - super(BatchTestHandler, self).emit(record) + super().emit(record) self.batches.append(records) def test_threaded_wrapper_handler(logger): from logbook.queues import ThreadedWrapperHandler + test_handler = BatchTestHandler() with ThreadedWrapperHandler(test_handler) as handler: - logger.warn('Just testing') - logger.error('More testing') + logger.warn("Just testing") + logger.error("More testing") # give it some time to sync up handler.close() - assert (not handler.controller.running) + assert not handler.controller.running assert len(test_handler.records) == 2 assert len(test_handler.batches) == 2 - assert all((len(records) == 1 for records in test_handler.batches)) - assert test_handler.has_warning('Just testing') - assert test_handler.has_error('More testing') + assert all(len(records) == 1 for records in test_handler.batches) + assert test_handler.has_warning("Just testing") + assert test_handler.has_error("More testing") def test_threaded_wrapper_handler_emit(): from logbook.queues import ThreadedWrapperHandler + test_handler = BatchTestHandler() with ThreadedWrapperHandler(test_handler) as handler: - lr = logbook.LogRecord('Test Logger', logbook.WARNING, 'Just testing') + lr = logbook.LogRecord("Test Logger", logbook.WARNING, "Just testing") test_handler.emit(lr) - lr = logbook.LogRecord('Test Logger', logbook.ERROR, 'More testing') + lr = logbook.LogRecord("Test Logger", logbook.ERROR, "More testing") test_handler.emit(lr) # give it some time to sync up handler.close() - assert (not handler.controller.running) + assert not handler.controller.running assert len(test_handler.records) == 2 assert len(test_handler.batches) == 2 - assert all((len(records) == 1 for records in test_handler.batches)) - assert test_handler.has_warning('Just testing') - assert test_handler.has_error('More testing') + assert all(len(records) == 1 for records in test_handler.batches) + assert test_handler.has_warning("Just testing") + assert test_handler.has_error("More testing") def test_threaded_wrapper_handler_emit_batched(): from logbook.queues import ThreadedWrapperHandler + test_handler = BatchTestHandler() with ThreadedWrapperHandler(test_handler) as handler: - test_handler.emit_batch([ - logbook.LogRecord('Test Logger', logbook.WARNING, 'Just testing'), - logbook.LogRecord('Test Logger', logbook.ERROR, 'More testing'), - ], 'group') + test_handler.emit_batch( + [ + logbook.LogRecord("Test Logger", logbook.WARNING, "Just testing"), + logbook.LogRecord("Test Logger", logbook.ERROR, "More testing"), + ], + "group", + ) # give it some time to sync up handler.close() - assert (not handler.controller.running) + assert not handler.controller.running assert len(test_handler.records) == 2 assert len(test_handler.batches) == 1 - (records, ) = test_handler.batches + (records,) = test_handler.batches assert len(records) == 2 - assert test_handler.has_warning('Just testing') - assert test_handler.has_error('More testing') + assert test_handler.has_warning("Just testing") + assert test_handler.has_error("More testing") -@require_module('execnet') +@require_module("execnet") def test_execnet_handler(): def run_on_remote(channel): import logbook from logbook.queues import ExecnetChannelHandler + handler = ExecnetChannelHandler(channel) - log = logbook.Logger('Execnet') + log = logbook.Logger("Execnet") handler.push_application() - log.info('Execnet works') + log.info("Execnet works") import execnet + gw = execnet.makegateway() channel = gw.remote_exec(run_on_remote) from logbook.queues import ExecnetChannelSubscriber + subscriber = ExecnetChannelSubscriber(channel) record = subscriber.recv() - assert record.msg == 'Execnet works' + assert record.msg == "Execnet works" gw.exit() -class SubscriberGroupSendBack(object): +class SubscriberGroupSendBack: def __init__(self, message, queue): self.message = message self.queue = queue def __call__(self): from logbook.queues import MultiProcessingHandler + with MultiProcessingHandler(self.queue): logbook.warn(self.message) -@require_module('multiprocessing') +@require_module("multiprocessing") def test_subscriber_group(): - if os.getenv('APPVEYOR') == 'True': - pytest.skip('Test hangs on AppVeyor CI') + if os.getenv("APPVEYOR") == "True": + pytest.skip("Test hangs on AppVeyor CI") from multiprocessing import Process, Queue + from logbook.queues import MultiProcessingSubscriber, SubscriberGroup + a_queue = Queue(-1) b_queue = Queue(-1) - subscriber = SubscriberGroup([ - MultiProcessingSubscriber(a_queue), - MultiProcessingSubscriber(b_queue) - ]) + subscriber = SubscriberGroup( + [MultiProcessingSubscriber(a_queue), MultiProcessingSubscriber(b_queue)] + ) for _ in range(10): - p1 = Process(target=SubscriberGroupSendBack('foo', a_queue)) - p2 = Process(target=SubscriberGroupSendBack('bar', b_queue)) + p1 = Process(target=SubscriberGroupSendBack("foo", a_queue)) + p2 = Process(target=SubscriberGroupSendBack("bar", b_queue)) p1.start() p2.start() p1.join() p2.join() messages = [subscriber.recv().message for i in (1, 2)] - assert sorted(messages) == ['bar', 'foo'] + assert sorted(messages) == ["bar", "foo"] -@require_module('redis') +@require_module("redis") def test_redis_handler(): import redis + from logbook.queues import RedisHandler - KEY = 'redis-{}'.format(os.getpid()) - FIELDS = ['message', 'host'] - r = redis.Redis(decode_responses=True) + KEY = f"redis-{os.getpid()}" + FIELDS = ["message", "host"] + r = redis.Redis(REDIS_HOST, REDIS_PORT, decode_responses=True) redis_handler = RedisHandler(key=KEY, level=logbook.INFO, bubble=True) # We don't want output for the tests, so we can wrap everything in a # NullHandler @@ -244,7 +261,7 @@ def test_redis_handler(): assert message.find(LETTERS) # Change the key of the handler and check on redis - KEY = 'test_another_key-{}'.format(os.getpid()) + KEY = f"test_another_key-{os.getpid()}" redis_handler.key = KEY with null_handler.applicationbound(): @@ -255,11 +272,12 @@ def test_redis_handler(): assert key == KEY # Check that extra fields are added if specified when creating the handler - FIELDS.append('type') - extra_fields = {'type': 'test'} - del(redis_handler) - redis_handler = RedisHandler(key=KEY, level=logbook.INFO, - extra_fields=extra_fields, bubble=True) + FIELDS.append("type") + extra_fields = {"type": "test"} + del redis_handler + redis_handler = RedisHandler( + key=KEY, level=logbook.INFO, extra_fields=extra_fields, bubble=True + ) with null_handler.applicationbound(): with redis_handler: @@ -268,34 +286,37 @@ def test_redis_handler(): key, message = r.blpop(KEY) for field in FIELDS: assert message.find(field) - assert message.find('test') + assert message.find("test") # And finally, check that fields are correctly added if appended to the # log message - FIELDS.append('more_info') + FIELDS.append("more_info") with null_handler.applicationbound(): with redis_handler: - logbook.info(LETTERS, more_info='This works') + logbook.info(LETTERS, more_info="This works") key, message = r.blpop(KEY) for field in FIELDS: assert message.find(field) - assert message.find('This works') + assert message.find("This works") -@require_module('redis') +@require_module("redis") def test_redis_handler_lpush(): """ Test if lpush stores messages in the right order new items should be first on list """ import redis + from logbook.queues import RedisHandler + null_handler = logbook.NullHandler() - KEY = 'lpushed-'.format(os.getpid()) - redis_handler = RedisHandler(key=KEY, push_method='lpush', - level=logbook.INFO, bubble=True) + KEY = f"lpushed-" + redis_handler = RedisHandler( + key=KEY, push_method="lpush", level=logbook.INFO, bubble=True + ) with null_handler.applicationbound(): with redis_handler: @@ -304,26 +325,29 @@ def test_redis_handler_lpush(): time.sleep(1.5) - r = redis.Redis(decode_responses=True) + r = redis.Redis(REDIS_HOST, REDIS_PORT, decode_responses=True) logs = r.lrange(KEY, 0, -1) assert logs assert "new item" in logs[0] r.delete(KEY) -@require_module('redis') +@require_module("redis") def test_redis_handler_rpush(): """ Test if rpush stores messages in the right order old items should be first on list """ import redis + from logbook.queues import RedisHandler + null_handler = logbook.NullHandler() - KEY = 'rpushed-' + str(os.getpid()) - redis_handler = RedisHandler(key=KEY, push_method='rpush', - level=logbook.INFO, bubble=True) + KEY = "rpushed-" + str(os.getpid()) + redis_handler = RedisHandler( + key=KEY, push_method="rpush", level=logbook.INFO, bubble=True + ) with null_handler.applicationbound(): with redis_handler: @@ -332,7 +356,7 @@ def test_redis_handler_rpush(): time.sleep(1.5) - r = redis.Redis(decode_responses=True) + r = redis.Redis(REDIS_HOST, REDIS_PORT, decode_responses=True) logs = r.lrange(KEY, 0, -1) assert logs assert "old item" in logs[0] @@ -355,12 +379,12 @@ def handlers_subscriber(multi): # Get an unused port tempsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - tempsock.bind(('127.0.0.1', 0)) + tempsock.bind(("127.0.0.1", 0)) host, unused_port = tempsock.getsockname() tempsock.close() # Retrieve the ZeroMQ handler and subscriber - uri = 'tcp://%s:%d' % (host, unused_port) + uri = "tcp://%s:%d" % (host, unused_port) if multi: handlers = [ZeroMQHandler(uri, multi=True) for _ in range(3)] else: @@ -371,6 +395,6 @@ def handlers_subscriber(multi): return handlers, subscriber -@pytest.fixture(params=[True, False]) +@pytest.fixture(params=[True, False], ids=["multi", "nomulti"]) def multi(request): return request.param diff --git a/tests/test_speedups.py b/tests/test_speedups.py new file mode 100644 index 00000000..c502df97 --- /dev/null +++ b/tests/test_speedups.py @@ -0,0 +1,32 @@ +import importlib + +import pytest + + +@pytest.fixture(params=["speedups", "fallback"]) +def speedups_module(request): + mod_name = f"logbook._{request.param}" + try: + return importlib.import_module(mod_name) + except ImportError: + pytest.skip(f"{mod_name} is not available") + + +def test_group_reflected_property(speedups_module): + class Group: + foo = "group" + + class A: + foo = speedups_module.group_reflected_property("foo", "default") + + def __init__(self, group=None): + self.group = group + + a = A() + assert a.foo == "default" + a.group = Group() + assert a.foo == "group" + a.foo = "set" + assert a.foo == "set" + del a.foo + assert a.foo == "group" diff --git a/tests/test_syslog_handler.py b/tests/test_syslog_handler.py index 99447ef5..d8cabc71 100644 --- a/tests/test_syslog_handler.py +++ b/tests/test_syslog_handler.py @@ -3,31 +3,32 @@ import socket from contextlib import closing -import logbook import pytest +import logbook + UNIX_SOCKET = "/tmp/__unixsock_logbook.test" -DELIMITERS = { - socket.AF_INET: '\n' -} +DELIMITERS = {socket.AF_INET: "\n"} TO_TEST = [ - (socket.AF_INET, socket.SOCK_DGRAM, ('127.0.0.1', 0)), - (socket.AF_INET, socket.SOCK_STREAM, ('127.0.0.1', 0)), + (socket.AF_INET, socket.SOCK_DGRAM, ("127.0.0.1", 0)), + (socket.AF_INET, socket.SOCK_STREAM, ("127.0.0.1", 0)), ] -UNIX_SOCKET_AVAILABLE = hasattr(socket, 'AF_UNIX') +UNIX_SOCKET_AVAILABLE = hasattr(socket, "AF_UNIX") if UNIX_SOCKET_AVAILABLE: - DELIMITERS[socket.AF_UNIX] = '\x00' + DELIMITERS[socket.AF_UNIX] = "\x00" TO_TEST.append((socket.AF_UNIX, socket.SOCK_DGRAM, UNIX_SOCKET)) @pytest.mark.usefixtures("unix_sock_path") @pytest.mark.parametrize("sock_family,socktype,address", TO_TEST) -@pytest.mark.parametrize("app_name", [None, 'Testing']) -def test_syslog_handler(logger, activation_strategy, sock_family, socktype, address, app_name): +@pytest.mark.parametrize("app_name", [None, "Testing"]) +def test_syslog_handler( + logger, activation_strategy, sock_family, socktype, address, app_name +): delimiter = DELIMITERS[sock_family] with closing(socket.socket(sock_family, socktype)) as inc: inc.bind(address) @@ -38,18 +39,25 @@ def test_syslog_handler(logger, activation_strategy, sock_family, socktype, addr inc.settimeout(1) if UNIX_SOCKET_AVAILABLE and sock_family == socket.AF_UNIX: - expected = (r'^<12>%stestlogger: Syslog is weird%s$' % (app_name + ':' if app_name else '', delimiter)) + expected = r"^<12>{}testlogger: Syslog is weird{}$".format( + app_name + ":" if app_name else "", delimiter + ) else: - expected = (r'^<12>1 \d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z %s %s %d - - %sSyslog is weird%s$' % ( - socket.gethostname(), - app_name if app_name else 'testlogger', - os.getpid(), 'testlogger: ' if app_name else '', - delimiter)) + expected = ( + r"^<12>1 \d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z %s %s %d - - %sSyslog is weird%s$" + % ( + socket.gethostname(), + app_name if app_name else "testlogger", + os.getpid(), + "testlogger: " if app_name else "", + delimiter, + ) + ) handler = logbook.SyslogHandler(app_name, inc.getsockname(), socktype=socktype) with activation_strategy(handler): - logger.warn('Syslog is weird') + logger.warn("Syslog is weird") if socktype == socket.SOCK_STREAM: with closing(inc.accept()[0]) as inc2: @@ -57,9 +65,8 @@ def test_syslog_handler(logger, activation_strategy, sock_family, socktype, addr else: rv = inc.recvfrom(1024)[0] - rv = rv.decode('utf-8') - assert re.match(expected, rv), \ - 'expected {}, got {}'.format(expected, rv) + rv = rv.decode("utf-8") + assert re.match(expected, rv), f"expected {expected}, got {rv}" @pytest.fixture diff --git a/tests/test_test_handler.py b/tests/test_test_handler.py index 4d651faa..fed825fb 100644 --- a/tests/test_test_handler.py +++ b/tests/test_test_handler.py @@ -3,46 +3,52 @@ import pytest -@pytest.mark.parametrize("level, method", [ - ("trace", "has_traces"), - ("debug", "has_debugs"), - ("info", "has_infos"), - ("notice", "has_notices"), - ("warning", "has_warnings"), - ("error", "has_errors"), - ("critical", "has_criticals"), -]) +@pytest.mark.parametrize( + "level, method", + [ + ("trace", "has_traces"), + ("debug", "has_debugs"), + ("info", "has_infos"), + ("notice", "has_notices"), + ("warning", "has_warnings"), + ("error", "has_errors"), + ("critical", "has_criticals"), + ], +) def test_has_level(active_handler, logger, level, method): log = getattr(logger, level) - log('Hello World') + log("Hello World") assert getattr(active_handler, method) -@pytest.mark.parametrize("level, method", [ - ("trace", "has_trace"), - ("debug", "has_debug"), - ("info", "has_info"), - ("notice", "has_notice"), - ("warning", "has_warning"), - ("error", "has_error"), - ("critical", "has_critical"), -]) +@pytest.mark.parametrize( + "level, method", + [ + ("trace", "has_trace"), + ("debug", "has_debug"), + ("info", "has_info"), + ("notice", "has_notice"), + ("warning", "has_warning"), + ("error", "has_error"), + ("critical", "has_critical"), + ], +) def test_regex_matching(active_handler, logger, level, method): log = getattr(logger, level) - log('Hello World') + log("Hello World") has_level_method = getattr(active_handler, method) - assert has_level_method(re.compile('^Hello')) - assert (not has_level_method(re.compile('world$'))) - assert (not has_level_method('^Hello World')) + assert has_level_method(re.compile("^Hello")) + assert not has_level_method(re.compile("world$")) + assert not has_level_method("^Hello World") def test_test_handler_cache(active_handler, logger): - logger.warn('First line') + logger.warn("First line") assert len(active_handler.formatted_records) == 1 # store cache, to make sure it is identifiable cache = active_handler.formatted_records assert len(active_handler.formatted_records) == 1 assert cache is active_handler.formatted_records - logger.warn('Second line invalidates cache') + logger.warn("Second line invalidates cache") assert len(active_handler.formatted_records) == 2 - assert (cache is not active_handler.formatted_records) + assert cache is not active_handler.formatted_records diff --git a/tests/test_ticketing.py b/tests/test_ticketing.py index b203cb66..af988f6d 100644 --- a/tests/test_ticketing.py +++ b/tests/test_ticketing.py @@ -1,14 +1,7 @@ import os -import sys - -try: - from thread import get_ident -except ImportError: - from _thread import get_ident +from threading import get_ident import logbook -import pytest -from logbook.helpers import xrange from .utils import require_module @@ -16,21 +9,18 @@ if __file_without_pyc__.endswith(".pyc"): __file_without_pyc__ = __file_without_pyc__[:-1] -python_version = sys.version_info[:2] - -@pytest.mark.xfail( - os.name == 'nt' and (python_version == (3, 2) or python_version == (3, 3)), - reason='Problem with in-memory sqlite on Python 3.2, 3.3 and Windows') -@require_module('sqlalchemy') +@require_module("sqlalchemy") def test_basic_ticketing(logger): - from logbook.ticketing import TicketingHandler from time import sleep - with TicketingHandler('sqlite:///') as handler: - for x in xrange(5): - logger.warn('A warning') + + from logbook.ticketing import TicketingHandler + + with TicketingHandler("sqlite:///") as handler: + for x in range(5): + logger.warn("A warning") sleep(0.2) - logger.info('An error') + logger.info("An error") sleep(0.2) if x < 2: try: @@ -56,15 +46,14 @@ def test_basic_ticketing(logger): ticket = handler.db.get_ticket(tickets[1].ticket_id) assert ticket == tickets[1] - occurrences = handler.db.get_occurrences(tickets[2].ticket_id, - order_by='time') + occurrences = handler.db.get_occurrences(tickets[2].ticket_id, order_by="time") assert len(occurrences) == 2 record = occurrences[0] assert __file_without_pyc__ in record.filename # avoid 2to3 destroying our assertion - assert getattr(record, 'func_name') == 'test_basic_ticketing' + assert getattr(record, "func_name") == "test_basic_ticketing" assert record.level == logbook.ERROR assert record.thread == get_ident() assert record.process == os.getpid() - assert record.channel == 'testlogger' - assert '1 / 0' in record.formatted_exception + assert record.channel == "testlogger" + assert "1 / 0" in record.formatted_exception diff --git a/tests/test_unicode.py b/tests/test_unicode.py index 96ff00c0..3a503c73 100644 --- a/tests/test_unicode.py +++ b/tests/test_unicode.py @@ -1,63 +1,53 @@ -# -*- coding: utf-8 -*- -from .utils import require_py3, capturing_stderr_context - import logbook +from .utils import capturing_stderr_context + -@require_py3 def test_default_format_unicode(logger): with capturing_stderr_context() as stream: - logger.warn('\u2603') - assert 'WARNING: testlogger: \u2603' in stream.getvalue() + logger.warn("\u2603") + assert "WARNING: testlogger: \u2603" in stream.getvalue() -@require_py3 def test_default_format_encoded(logger): with capturing_stderr_context() as stream: # it's a string but it's in the right encoding so don't barf - logger.warn('\u2603') - assert 'WARNING: testlogger: \u2603' in stream.getvalue() + logger.warn("\u2603") + assert "WARNING: testlogger: \u2603" in stream.getvalue() -@require_py3 def test_default_format_bad_encoding(logger): with capturing_stderr_context() as stream: # it's a string, is wrong, but just dump it in the logger, # don't try to decode/encode it - logger.warn('Русский'.encode('koi8-r')) + logger.warn("Русский".encode("koi8-r")) expected = "WARNING: testlogger: b'\\xf2\\xd5\\xd3\\xd3\\xcb\\xc9\\xca'" assert expected in stream.getvalue() -@require_py3 def test_custom_unicode_format_unicode(logger): - format_string = ('[{record.level_name}] ' - '{record.channel}: {record.message}') + format_string = "[{record.level_name}] {record.channel}: {record.message}" with capturing_stderr_context() as stream: with logbook.StderrHandler(format_string=format_string): logger.warn("\u2603") - assert '[WARNING] testlogger: \u2603' in stream.getvalue() + assert "[WARNING] testlogger: \u2603" in stream.getvalue() -@require_py3 def test_custom_string_format_unicode(logger): - format_string = ('[{record.level_name}] ' - '{record.channel}: {record.message}') + format_string = "[{record.level_name}] {record.channel}: {record.message}" with capturing_stderr_context() as stream: with logbook.StderrHandler(format_string=format_string): - logger.warn('\u2603') - assert '[WARNING] testlogger: \u2603' in stream.getvalue() + logger.warn("\u2603") + assert "[WARNING] testlogger: \u2603" in stream.getvalue() -@require_py3 def test_unicode_message_encoded_params(logger): with capturing_stderr_context() as stream: - logger.warn("\u2603 {0}", "\u2603".encode('utf8')) + logger.warn("\u2603 {0}", "\u2603".encode()) assert "WARNING: testlogger: \u2603 b'\\xe2\\x98\\x83'" in stream.getvalue() -@require_py3 def test_encoded_message_unicode_params(logger): with capturing_stderr_context() as stream: - logger.warn('\u2603 {0}'.encode('utf8'), '\u2603') - assert 'WARNING: testlogger: \u2603 \u2603' in stream.getvalue() + logger.warn("\u2603 {0}".encode(), "\u2603") + assert "WARNING: testlogger: \u2603 \u2603" in stream.getvalue() diff --git a/tests/test_utils.py b/tests/test_utils.py index f4ca5b82..04453c52 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,64 +1,68 @@ +from time import sleep +from unittest.mock import Mock, call + import pytest -import logbook +import logbook from logbook.utils import ( - logged_if_slow, deprecated, forget_deprecation_locations, - suppressed_deprecations, log_deprecation_message) -from time import sleep + deprecated, + forget_deprecation_locations, + log_deprecation_message, + logged_if_slow, + suppressed_deprecations, +) _THRESHOLD = 0.1 -try: - from unittest.mock import Mock, call -except ImportError: - from mock import Mock, call - +@pytest.mark.flaky(reruns=5) def test_logged_if_slow_reached(test_handler): with test_handler.applicationbound(): - with logged_if_slow('checking...', threshold=_THRESHOLD): + with logged_if_slow("checking...", threshold=_THRESHOLD): sleep(2 * _THRESHOLD) assert len(test_handler.records) == 1 [record] = test_handler.records - assert record.message == 'checking...' + assert record.message == "checking..." +@pytest.mark.flaky(reruns=5) def test_logged_if_slow_did_not_reached(test_handler): with test_handler.applicationbound(): - with logged_if_slow('checking...', threshold=_THRESHOLD): + with logged_if_slow("checking...", threshold=_THRESHOLD): sleep(_THRESHOLD / 2) assert len(test_handler.records) == 0 +@pytest.mark.flaky(reruns=5) def test_logged_if_slow_logger(): logger = Mock() - with logged_if_slow('checking...', threshold=_THRESHOLD, logger=logger): + with logged_if_slow("checking...", threshold=_THRESHOLD, logger=logger): sleep(2 * _THRESHOLD) - assert logger.log.call_args == call(logbook.DEBUG, 'checking...') + assert logger.log.call_args == call(logbook.DEBUG, "checking...") +@pytest.mark.flaky(reruns=5) def test_logged_if_slow_level(test_handler): with test_handler.applicationbound(): - with logged_if_slow('checking...', threshold=_THRESHOLD, - level=logbook.WARNING): + with logged_if_slow("checking...", threshold=_THRESHOLD, level=logbook.WARNING): sleep(2 * _THRESHOLD) assert test_handler.records[0].level == logbook.WARNING +@pytest.mark.flaky(reruns=5) def test_logged_if_slow_deprecated(logger, test_handler): with test_handler.applicationbound(): - with logged_if_slow('checking...', threshold=_THRESHOLD, - func=logbook.error): + with logged_if_slow("checking...", threshold=_THRESHOLD, func=logbook.error): sleep(2 * _THRESHOLD) assert test_handler.records[0].level == logbook.ERROR - assert test_handler.records[0].message == 'checking...' + assert test_handler.records[0].message == "checking..." with pytest.raises(TypeError): - logged_if_slow('checking...', logger=logger, func=logger.error) + logged_if_slow("checking...", logger=logger, func=logger.error) def test_deprecated_func_called(capture): @@ -70,11 +74,10 @@ def test_deprecation_message(capture): [record] = capture.records assert "deprecated" in record.message - assert 'deprecated_func' in record.message + assert "deprecated_func" in record.message def test_deprecation_with_message(capture): - @deprecated("use something else instead") def func(a, b): return a + b @@ -87,8 +90,7 @@ def func(a, b): def test_no_deprecations(capture): - - @deprecated('msg') + @deprecated("msg") def func(a, b): return a + b @@ -101,12 +103,10 @@ def _no_decorator(func): return func -@pytest.mark.parametrize('decorator', [_no_decorator, classmethod]) +@pytest.mark.parametrize("decorator", [_no_decorator, classmethod]) def test_class_deprecation(capture, decorator): - - class Bla(object): - - @deprecated('reason') + class Bla: + @deprecated("reason") @classmethod def func(self, a, b): assert isinstance(self, Bla) @@ -115,11 +115,10 @@ def func(self, a, b): assert Bla().func(2, 4) == 6 [record] = capture.records - assert 'Bla.func is deprecated' in record.message + assert "Bla.func is deprecated" in record.message def test_deprecations_different_sources(capture): - def f(): deprecated_func(1, 2) @@ -132,7 +131,6 @@ def g(): def test_deprecations_same_sources(capture): - def f(): deprecated_func(1, 2) @@ -142,12 +140,11 @@ def f(): def test_deprecation_message_different_sources(capture): - def f(flag): if flag: - log_deprecation_message('first message type') + log_deprecation_message("first message type") else: - log_deprecation_message('second message type') + log_deprecation_message("second message type") f(True) f(False) @@ -155,12 +152,11 @@ def f(flag): def test_deprecation_message_same_sources(capture): - def f(flag): if flag: - log_deprecation_message('first message type') + log_deprecation_message("first message type") else: - log_deprecation_message('second message type') + log_deprecation_message("second message type") f(True) f(True) @@ -169,11 +165,12 @@ def f(flag): def test_deprecation_message_full_warning(capture): def f(): - log_deprecation_message('some_message') + log_deprecation_message("some_message") + f() [record] = capture.records - assert record.message == 'Deprecation message: some_message' + assert record.message == "Deprecation message: some_message" def test_name_doc(): @@ -182,39 +179,36 @@ def some_func(): """docstring here""" pass - assert some_func.__name__ == 'some_func' - assert 'docstring here' in some_func.__doc__ + assert some_func.__name__ == "some_func" + assert "docstring here" in some_func.__doc__ def test_doc_update(): - @deprecated('some_message') + @deprecated("some_message") def some_func(): """docstring here""" pass - some_func.__doc__ = 'new_docstring' + some_func.__doc__ = "new_docstring" - assert 'docstring here' not in some_func.__doc__ - assert 'new_docstring' in some_func.__doc__ - assert 'some_message' in some_func.__doc__ + assert "docstring here" not in some_func.__doc__ + assert "new_docstring" in some_func.__doc__ + assert "some_message" in some_func.__doc__ def test_deprecatd_docstring(): - message = "Use something else instead" @deprecated() def some_func(): - """This is a function - """ + """This is a function""" @deprecated(message) def other_func(): - """This is another function - """ + """This is another function""" assert ".. deprecated" in some_func.__doc__ - assert ".. deprecated\n {0}".format(message) in other_func.__doc__ + assert f".. deprecated\n {message}" in other_func.__doc__ @pytest.fixture @@ -225,6 +219,7 @@ def capture(request): @request.addfinalizer def pop(): handler.pop_application() + return handler diff --git a/tests/utils.py b/tests/utils.py index d014ccac..277d9be4 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ test utils for logbook ~~~~~~~~~~~~~~~~~~~~~~ @@ -7,47 +6,28 @@ :license: BSD, see LICENSE for more details. """ import functools -import os +import importlib import sys from contextlib import contextmanager - -import logbook -from logbook.helpers import StringIO +from io import StringIO import pytest +import logbook + _missing = object() LETTERS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" -def get_total_delta_seconds(delta): - """ - Replacement for datetime.timedelta.total_seconds() for Python 2.5, 2.6 - and 3.1 - """ - return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6 - - -require_py3 = pytest.mark.skipif( - sys.version_info[0] < 3, reason="Requires Python 3") - -appveyor = pytest.mark.skipif( - os.environ.get('APPVEYOR') != 'True', reason='AppVeyor CI test') - -travis = pytest.mark.skipif( - os.environ.get('TRAVIS') != 'true', reason='Travis CI test') - - def require_module(module_name): found = True try: - __import__(module_name) + importlib.import_module(module_name) except ImportError: found = False - return pytest.mark.skipif( - not found, reason='Module {0} is required'.format(module_name)) + return pytest.mark.skipif(not found, reason=f"Module {module_name} is required") def make_fake_mail_handler(**kwargs): @@ -63,8 +43,8 @@ def close_connection(self, con): def sendmail(self, fromaddr, recipients, mail): self.mails.append((fromaddr, recipients, mail)) - kwargs.setdefault('level', logbook.ERROR) - return FakeMailHandler('foo@example.com', ['bar@example.com'], **kwargs) + kwargs.setdefault("level", logbook.ERROR) + return FakeMailHandler("foo@example.com", ["bar@example.com"], **kwargs) def missing(name): @@ -80,7 +60,9 @@ def wrapper(*args, **kwargs): del sys.modules[name] else: sys.modules[name] = old + return wrapper + return decorate diff --git a/tox.ini b/tox.ini index 7c215091..4073795e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,32 +1,34 @@ [tox] -envlist = py{27,35,36,37}{,-speedups},pypy,py37-docs -skipsdist = True +envlist = py{37,38,39,310,311}{,-nospeedups},pypy,docs [testenv] -whitelist_externals = - rm +extras = + all + test deps = - py{27}: mock - pytest - speedups: Cython -setenv = - !speedups: DISABLE_LOGBOOK_CEXT=1 - !speedups: DISABLE_LOGBOOK_CEXT_AT_RUNTIME=1 -changedir = {toxinidir} + gevent +set_env = + nospeedups: DISABLE_LOGBOOK_CEXT_AT_RUNTIME=1 +pass_env = + REDIS_HOST + REDIS_PORT + ENABLE_LOGBOOK_NTEVENTLOG_TESTS commands = - {envpython} -m pip install -e {toxinidir}[all] + pytest {posargs} - # Make sure that speedups are available/not available, as needed. - speedups: {envpython} -c "from logbook.base import _has_speedups; exit(0 if _has_speedups else 1)" - !speedups: {envpython} -c "from logbook.base import _has_speedups; exit(1 if _has_speedups else 0)" - - {envpython} {toxinidir}/scripts/test_setup.py - py.test {toxinidir}/tests - -[testenv:py37-docs] -deps = - Sphinx>=1.3 +[testenv:docs] +basepython = python3.11 +extras = + docs changedir = docs commands = sphinx-build -W -b html . _build/html sphinx-build -W -b linkcheck . _build/linkcheck + +[gh-actions] +python = + 3.7: py37 + 3.8: py38 + 3.9: py39 + 3.10: py310 + 3.11: py311, docs diff --git a/twitter-secrets.txt b/twitter-secrets.txt deleted file mode 100644 index 2a695b03..00000000 --- a/twitter-secrets.txt +++ /dev/null @@ -1,11 +0,0 @@ -Leaked Twitter Secrets - -Twitter for Android - xauth: yes - key: 3nVuSoBZnx6U4vzUxf5w - secret: Bcs59EFbbsdF6Sl9Ng71smgStWEGwXXKSjYvPVt7qys - -Echofon: - xauth: yes - key: yqoymTNrS9ZDGsBnlFhIuw - secret: OMai1whT3sT3XMskI7DZ7xiju5i5rAYJnxSEHaKYvEs