From 67d029c14b3f029842f28d3d02fa17041d42c2f7 Mon Sep 17 00:00:00 2001 From: Jeff Hale Date: Thu, 11 Jan 2024 12:14:06 -0500 Subject: [PATCH] Adds support for Python 3.12 (#11306) Co-authored-by: zangell44 Co-authored-by: Zach Angell <42625717+zangell44@users.noreply.github.com> --- .github/workflows/docker-images.yaml | 1 + .github/workflows/python-tests.yaml | 6 +- .github/workflows/windows-pull-request.yaml | 2 + .github/workflows/windows-tests.yaml | 4 +- Dockerfile | 16 +- docs/concepts/flows.md | 9 +- docs/concepts/tasks.md | 38 +++-- docs/getting-started/installation.md | 13 +- requirements-client.txt | 4 +- setup.cfg | 3 + setup.py | 1 + src/prefect/cli/deployment.py | 4 +- src/prefect/client/schemas/schedules.py | 19 ++- src/prefect/server/database/orm_models.py | 4 +- src/prefect/server/schemas/schedules.py | 26 +++- tests/cli/test_start_server.py | 164 +++++++++++--------- tests/client/test_prefect_client.py | 2 +- tests/fixtures/database.py | 6 +- tests/server/api/test_run_history.py | 30 +++- tests/server/schemas/test_schedules.py | 10 +- tests/server/utilities/test_schemas.py | 2 +- tests/workers/test_base_worker.py | 123 ++++++++++----- versioneer.py | 14 +- 23 files changed, 308 insertions(+), 193 deletions(-) diff --git a/.github/workflows/docker-images.yaml b/.github/workflows/docker-images.yaml index 1d7b75a446e2..c050099d3d5e 100644 --- a/.github/workflows/docker-images.yaml +++ b/.github/workflows/docker-images.yaml @@ -26,6 +26,7 @@ jobs: - "3.9" - "3.10" - "3.11" + - "3.12" steps: - name: Set up QEMU diff --git a/.github/workflows/python-tests.yaml b/.github/workflows/python-tests.yaml index 514e166dcf43..8a350c72218c 100644 --- a/.github/workflows/python-tests.yaml +++ b/.github/workflows/python-tests.yaml @@ -1,7 +1,5 @@ name: Unit tests -# Note: Conda support for 3.11 is pending. See https://github.com/ContinuumIO/anaconda-issues/issues/13082 - env: # enable colored output # https://github.com/pytest-dev/pytest/issues/7443 @@ -61,6 +59,7 @@ jobs: - "3.9" - "3.10" - "3.11" + - "3.12" pytest-options: - "--exclude-services" - "--only-services" @@ -144,8 +143,7 @@ jobs: docker run --rm prefecthq/prefect-dev:${{ steps.get_image_tag.outputs.image_tag }} prefect version - name: Build Conda flavored test image - # Not yet supported for 3.11, see note at top - if: ${{ matrix.build-docker-images && matrix.python-version != '3.11' }} + if: ${{ matrix.build-docker-images }} uses: docker/build-push-action@v5 with: context: . diff --git a/.github/workflows/windows-pull-request.yaml b/.github/workflows/windows-pull-request.yaml index f5df38d71833..688f1978ee75 100644 --- a/.github/workflows/windows-pull-request.yaml +++ b/.github/workflows/windows-pull-request.yaml @@ -36,6 +36,8 @@ jobs: python-version: - "3.9" - "3.10" + - "3.11" + - "3.12" fail-fast: false diff --git a/.github/workflows/windows-tests.yaml b/.github/workflows/windows-tests.yaml index 4869ce588ba5..5df4635808f6 100644 --- a/.github/workflows/windows-tests.yaml +++ b/.github/workflows/windows-tests.yaml @@ -16,6 +16,8 @@ jobs: python-version: - "3.9" - "3.10" + - "3.11" + - "3.12" fail-fast: false @@ -51,4 +53,4 @@ jobs: - name: Run tests run: | # Parallelize tests by scope to reduce expensive service fixture duplication - pytest tests -vv --numprocesses auto --dist loadscope --exclude-services --durations=25 + pytest tests -vv --numprocesses auto --dist loadscope --exclude-services --durations=25 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 9d8b3c498d98..8046decb00b4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,8 +20,8 @@ WORKDIR /opt/ui RUN apt-get update && \ apt-get install --no-install-recommends -y \ - # Required for arm64 builds - chromium \ + # Required for arm64 builds + chromium \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Install a newer npm to avoid esbuild errors @@ -46,8 +46,8 @@ WORKDIR /opt/prefect RUN apt-get update && \ apt-get install --no-install-recommends -y \ - gpg \ - git=1:2.* \ + gpg \ + git=1:2.* \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Copy the repository in; requires full git history for versions to generate correctly @@ -96,13 +96,13 @@ WORKDIR /opt/prefect # - git: Required for retrieving workflows from git sources RUN apt-get update && \ apt-get install --no-install-recommends -y \ - tini=0.19.* \ - build-essential \ - git=1:2.* \ + tini=0.19.* \ + build-essential \ + git=1:2.* \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Pin the pip version -RUN python -m pip install --no-cache-dir pip==22.3.1 +RUN python -m pip install --no-cache-dir pip==23.3.1 # Install the base requirements separately so they cache COPY requirements-client.txt requirements.txt ./ diff --git a/docs/concepts/flows.md b/docs/concepts/flows.md index 47f805dbb999..b1d3c706e093 100644 --- a/docs/concepts/flows.md +++ b/docs/concepts/flows.md @@ -160,7 +160,7 @@ def my_flow(name: str, date: datetime.datetime): pass # creates a flow run called 'marvin-on-Thursday' -my_flow(name="marvin", date=datetime.datetime.utcnow()) +my_flow(name="marvin", date=datetime.datetime.now(datetime.timezone.utc)) ``` Additionally this setting also accepts a function that returns a string for the flow run name: @@ -170,7 +170,7 @@ import datetime from prefect import flow def generate_flow_run_name(): - date = datetime.datetime.utcnow() + date = datetime.datetime.now(datetime.timezone.utc) return f"{date:%A}-is-a-nice-day" @@ -478,7 +478,7 @@ from datetime import datetime @flow def what_day_is_it(date: datetime = None): if date is None: - date = datetime.utcnow() + date = datetime.now(timezone.utc) print(f"It was {date.strftime('%A')} on {date.isoformat()}") what_day_is_it("2021-01-01T02:00:19.180906") @@ -906,7 +906,7 @@ if __name__ == "__main__": ## Pausing or suspending a flow run Prefect provides you with the ability to halt a flow run with two functions that are similar, but slightly different. -When a flow run is paused, code execution is stopped and the process continues to run. +When a flow run is paused, code execution is stopped and the process continues to run. When a flow run is suspended, code execution is stopped and so is the process. ### Pausing a flow run @@ -1081,6 +1081,7 @@ async def greet_user(): logger.info(f"Hello, {user_input.name}!") ``` + Running this flow will create a flow run. The flow run will advance until code execution reaches `pause_flow_run`, at which point it will move into a `Paused` state. Execution will block and wait for resumption. diff --git a/docs/concepts/tasks.md b/docs/concepts/tasks.md index 8923dff4e529..2b9f6d30b109 100644 --- a/docs/concepts/tasks.md +++ b/docs/concepts/tasks.md @@ -22,7 +22,7 @@ search: # Tasks -A task is a function that represents a discrete unit of work in a Prefect workflow. Tasks are not required — you may define Prefect workflows that consist only of flows, using regular Python statements and functions. Tasks enable you to encapsulate elements of your workflow logic in observable units that can be reused across flows and subflows. +A task is a function that represents a discrete unit of work in a Prefect workflow. Tasks are not required — you may define Prefect workflows that consist only of flows, using regular Python statements and functions. Tasks enable you to encapsulate elements of your workflow logic in observable units that can be reused across flows and subflows. ## Tasks overview @@ -30,7 +30,7 @@ Tasks are functions: they can take inputs, perform work, and return an output. A Tasks are special because they receive metadata about upstream dependencies and the state of those dependencies before they run, even if they don't receive any explicit data inputs from them. This gives you the opportunity to, for example, have a task wait on the completion of another task before executing. -Tasks also take advantage of automatic Prefect [logging](/concepts/logs/) to capture details about task runs such as runtime, tags, and final state. +Tasks also take advantage of automatic Prefect [logging](/concepts/logs/) to capture details about task runs such as runtime, tags, and final state. You can define your tasks within the same file as your flow definition, or you can define tasks within modules and import them for use in your flow definitions. All tasks must be called from within a flow. Tasks may not be called from other tasks. @@ -122,7 +122,7 @@ def my_task(name, date): @flow def my_flow(): # creates a run with a name like "hello-marvin-on-Thursday" - my_task(name="marvin", date=datetime.datetime.utcnow()) + my_task(name="marvin", date=datetime.datetime.now(datetime.timezone.utc)) ``` Additionally this setting also accepts a function that returns a string to be used for the task run name: @@ -132,7 +132,7 @@ import datetime from prefect import flow, task def generate_task_name(): - date = datetime.datetime.utcnow() + date = datetime.datetime.now(datetime.timezone.utc) return f"{date:%A}-is-a-lovely-day" @task(name="My Example Task", @@ -220,7 +220,6 @@ or _failed_ if it returned a string. !!! note "Retries don't create new task runs" A new task run is not created when a task is retried. A new state is added to the state history of the original task run. - ### A real-world example: making an API request Consider the real-world problem of making an API request. In this example, @@ -338,7 +337,7 @@ def some_task_with_exponential_backoff_retries(): ### Configuring retry behavior globally with settings -You can also set retries and retry delays by using the following global settings. These settings will not override the `retries` or `retry_delay_seconds` that are set in the flow or task decorator. +You can also set retries and retry delays by using the following global settings. These settings will not override the `retries` or `retry_delay_seconds` that are set in the flow or task decorator. ``` prefect config set PREFECT_FLOW_DEFAULT_RETRIES=2 @@ -377,12 +376,12 @@ def hello_flow(name_input): hello_task(name_input) ``` -Alternatively, you can provide your own function or other callable that returns a string cache key. A generic `cache_key_fn` is a function that accepts two positional arguments: +Alternatively, you can provide your own function or other callable that returns a string cache key. A generic `cache_key_fn` is a function that accepts two positional arguments: - The first argument corresponds to the `TaskRunContext`, which stores task run metadata in the attributes `task_run_id`, `flow_run_id`, and `task`. - The second argument corresponds to a dictionary of input values to the task. For example, if your task is defined with signature `fn(x, y, z)` then the dictionary will have keys `"x"`, `"y"`, and `"z"` with corresponding values that can be used to compute your cache key. -Note that the `cache_key_fn` is _not_ defined as a `@task`. +Note that the `cache_key_fn` is _not_ defined as a `@task`. !!! note "Task cache keys" By default, a task cache key is limited to 2000 characters, specified by the `PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH` setting. @@ -468,9 +467,9 @@ def caching_task(): ## Timeouts -Task timeouts are used to prevent unintentional long-running tasks. When the duration of execution for a task exceeds the duration specified in the timeout, a timeout exception will be raised and the task will be marked as failed. In the UI, the task will be visibly designated as `TimedOut`. From the perspective of the flow, the timed-out task will be treated like any other failed task. +Task timeouts are used to prevent unintentional long-running tasks. When the duration of execution for a task exceeds the duration specified in the timeout, a timeout exception will be raised and the task will be marked as failed. In the UI, the task will be visibly designated as `TimedOut`. From the perspective of the flow, the timed-out task will be treated like any other failed task. -Timeout durations are specified using the `timeout_seconds` keyword argument. +Timeout durations are specified using the `timeout_seconds` keyword argument. ```python from prefect import task, get_run_logger @@ -502,6 +501,7 @@ See [state returned values](/concepts/task-runners/#using-results-from-submitted If you just need the result from a task, you can simply call the task from your flow. For most workflows, the default behavior of calling a task directly and receiving a result is all you'll need. ## Wait for + To create a dependency between two tasks that do not exchange data, but one needs to wait for the other to finish, use the special [`wait_for`](/api-ref/prefect/tasks/#prefect.tasks.Task.submit) keyword argument: ```python @@ -618,7 +618,7 @@ Prefect has built-in functionality for achieving this: task concurrency limits. Task concurrency limits use [task tags](#tags). You can specify an optional concurrency limit as the maximum number of concurrent task runs in a `Running` state for tasks with a given tag. The specified concurrency limit applies to any task to which the tag is applied. -If a task has multiple tags, it will run only if _all_ tags have available concurrency. +If a task has multiple tags, it will run only if _all_ tags have available concurrency. Tags without explicit limits are considered to have unlimited concurrency. @@ -627,9 +627,9 @@ Tags without explicit limits are considered to have unlimited concurrency. ### Execution behavior -Task tag limits are checked whenever a task run attempts to enter a [`Running` state](/concepts/states/). +Task tag limits are checked whenever a task run attempts to enter a [`Running` state](/concepts/states/). -If there are no concurrency slots available for any one of your task's tags, the transition to a `Running` state will be delayed and the client is instructed to try entering a `Running` state again in 30 seconds (or the value specified by the `PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS` setting). +If there are no concurrency slots available for any one of your task's tags, the transition to a `Running` state will be delayed and the client is instructed to try entering a `Running` state again in 30 seconds (or the value specified by the `PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS` setting). !!! warning "Concurrency limits in subflows" Using concurrency limits on task runs in subflows can cause deadlocks. As a best practice, configure your tags and concurrency limits to avoid setting limits on task runs in subflows. @@ -639,7 +639,6 @@ If there are no concurrency slots available for any one of your task's tags, the !!! tip "Flow run concurrency limits are set at a work pool and/or work queue level" While task run concurrency limits are configured via tags (as shown below), [flow run concurrency limits](https://docs.prefect.io/latest/concepts/work-pools/#work-pool-concurrency) are configured via work pools and/or work queues. - You can set concurrency limits on as few or as many tags as you wish. You can set limits through: - Prefect [CLI](#cli) @@ -651,7 +650,7 @@ You can set concurrency limits on as few or as many tags as you wish. You can se You can create, list, and remove concurrency limits by using Prefect CLI `concurrency-limit` commands. ```bash -$ prefect concurrency-limit [command] [arguments] +prefect concurrency-limit [command] [arguments] ``` | Command | Description | @@ -664,24 +663,24 @@ $ prefect concurrency-limit [command] [arguments] For example, to set a concurrency limit of 10 on the 'small_instance' tag: ```bash -$ prefect concurrency-limit create small_instance 10 +prefect concurrency-limit create small_instance 10 ``` To delete the concurrency limit on the 'small_instance' tag: ```bash -$ prefect concurrency-limit delete small_instance +prefect concurrency-limit delete small_instance ``` To view details about the concurrency limit on the 'small_instance' tag: ```bash -$ prefect concurrency-limit inspect small_instance +prefect concurrency-limit inspect small_instance ``` #### Python client -To update your tag concurrency limits programmatically, use [`PrefectClient.orchestration.create_concurrency_limit`](../../api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.create_concurrency_limit). +To update your tag concurrency limits programmatically, use [`PrefectClient.orchestration.create_concurrency_limit`](../../api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.create_concurrency_limit). `create_concurrency_limit` takes two arguments: @@ -711,7 +710,6 @@ async with get_client() as client: If you wish to query for the currently set limit on a tag, use [`PrefectClient.read_concurrency_limit_by_tag`](/api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.read_concurrency_limit_by_tag), passing the tag: - To see _all_ of your limits across all of your tags, use [`PrefectClient.read_concurrency_limits`](/api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.read_concurrency_limits). ```python diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index 768394b7ae9a..30f67f7f5b6f 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -17,6 +17,8 @@ search: Prefect requires Python 3.8 or newer. +Python 3.12 support is experimental, as not all dependencies to support it yet. If you encounter any errors, please [open an issue](https://github.com/PrefectHQ/prefect/issues/new?assignees=&labels=needs%3Atriage%2Cbug&projects=&template=1_general_bug_report.yaml). +

@@ -104,6 +106,7 @@ Server type: ephemeral Server: Database: sqlite SQLite version: 3.42.0 + ``` @@ -123,11 +126,11 @@ If you're using Windows Subsystem for Linux (WSL), see [Linux installation notes ## Linux installation notes -Linux is a popular operating system for running Prefect. You can use [Prefect Cloud](/ui/cloud/) as your API server, or [host your own Prefect server](/host/) backed by [PostgreSQL](/concepts/database/#configuring_a_postgresql_database). +Linux is a popular operating system for running Prefect. You can use [Prefect Cloud](/ui/cloud/) as your API server, or [host your own Prefect server](/host/) backed by [PostgreSQL](/concepts/database/#configuring_a_postgresql_database). For development, you can use [SQLite](/concepts/database/#configuring_a_sqlite_database) 2.24 or newer as your database. Note that certain Linux versions of SQLite can be problematic. Compatible versions include Ubuntu 22.04 LTS and Ubuntu 20.04 LTS. -Alternatively, you can [install SQLite on Red Hat Enterprise Linux (RHEL)](#install-sqlite-on-rhel) or use the `conda` virtual environment manager and configure a compatible SQLite version. +Alternatively, you can [install SQLite on Red Hat Enterprise Linux (RHEL)](#install-sqlite-on-rhel) or use the `conda` virtual environment manager and configure a compatible SQLite version. ## Using a self-signed SSL certificate @@ -140,7 +143,6 @@ If the certificate is not part of your system bundle, you can set the ***Note:*** Disabling certificate validation is insecure and only suggested as an option for testing! - ## Proxies Prefect supports communicating via proxies through environment variables. Simply set `HTTPS_PROXY` and `SSL_CERT_FILE` in your environment, and the underlying network libraries will route Prefect’s requests appropriately. Read more about using Prefect Cloud with proxies [here](https://discourse.prefect.io/t/using-prefect-cloud-with-proxies/1696). @@ -149,7 +151,7 @@ Prefect supports communicating via proxies through environment variables. Simply ### SQLite -You can use [Prefect Cloud](/ui/cloud/) as your API server, or [host your own Prefect server](/host/) backed by [PostgreSQL](/concepts/database/#configuring_a_postgresql_database). +You can use [Prefect Cloud](/ui/cloud/) as your API server, or [host your own Prefect server](/host/) backed by [PostgreSQL](/concepts/database/#configuring_a_postgresql_database). By default, a local Prefect server instance uses SQLite as the backing database. SQLite is not packaged with the Prefect installation. Most systems will already have SQLite installed, because it is typically bundled as a part of Python. Prefect requires SQLite version 3.24.0 or later. @@ -236,4 +238,5 @@ For more information about these environment variables, see the [cURL documentation](https://everything.curl.dev/usingcurl/proxies/env). ## Next steps -Now that you have Prefect installed and your environment configured, you may want to check out the [Tutorial](/tutorial/) to get more familiar with Prefect. \ No newline at end of file + +Now that you have Prefect installed and your environment configured, you may want to check out the [Tutorial](/tutorial/) to get more familiar with Prefect. diff --git a/requirements-client.txt b/requirements-client.txt index 0af0bee4ead6..f3f292293b9a 100644 --- a/requirements-client.txt +++ b/requirements-client.txt @@ -15,7 +15,9 @@ jsonschema >= 3.2.0, < 5.0.0 orjson >= 3.7, < 4.0 packaging >= 21.3, < 24.3 pathspec >= 0.8.0 -pendulum >= 2.1.2, < 3.0.0 +# https://github.com/PrefectHQ/prefect/issues/11619 +pendulum < 3.0; python_version < '3.12' +pendulum >= 3.0.0, <4; python_version >= '3.12' # the version constraints for pydantic are merged with those from fastapi 0.103.2 pydantic[email]>=1.10.0,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0 python_dateutil >= 2.8.2, < 3.0.0 diff --git a/setup.cfg b/setup.cfg index 68d9639e1d1e..182672d747d6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -68,6 +68,9 @@ filterwarnings = ignore:Prefect will drop support for Python 3.7:FutureWarning ignore:`PREFECT_API_URL` uses `/account/` but should use `/accounts/`.:UserWarning ignore:`PREFECT_API_URL` should have `/api` after the base URL.:UserWarning + # datetime.datetime.utcnow() is deprecated as of Python 3.12, waiting on 3rd party fixes in boto3 https://github.com/boto/boto3/issues/3889 + ignore:datetime\.datetime\.utcnow\(\) is deprecated and scheduled for removal in a future version\..*:DeprecationWarning + ignore:datetime\.datetime\.utcfromtimestamp\(\) is deprecated and scheduled for removal in a future version\..*:DeprecationWarning [mypy] # TODO: We will allow definitions to be untyped for now; in the future we will want to diff --git a/setup.py b/setup.py index 492c71e8f6a9..e232846823cc 100644 --- a/setup.py +++ b/setup.py @@ -51,6 +51,7 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Software Development :: Libraries", ], ) diff --git a/src/prefect/cli/deployment.py b/src/prefect/cli/deployment.py index fca26949a91e..8ffe60aa5055 100644 --- a/src/prefect/cli/deployment.py +++ b/src/prefect/cli/deployment.py @@ -5,7 +5,7 @@ import sys import textwrap import warnings -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from pathlib import Path from typing import Any, Dict, List, Optional, Union @@ -574,7 +574,7 @@ async def run( "RETURN_AS_TIMEZONE_AWARE": False, "PREFER_DATES_FROM": "future", "RELATIVE_BASE": datetime.fromtimestamp( - now.timestamp(), tz=pendulum.tz.UTC + now.timestamp(), tz=timezone.utc ), }, ) diff --git a/src/prefect/client/schemas/schedules.py b/src/prefect/client/schemas/schedules.py index 607282452dde..ae0f03febb44 100644 --- a/src/prefect/client/schemas/schedules.py +++ b/src/prefect/client/schemas/schedules.py @@ -84,14 +84,20 @@ def default_anchor_date(cls, v): @validator("timezone", always=True) def default_timezone(cls, v, *, values, **kwargs): + # pendulum.tz.timezones is a callable in 3.0 and above + # https://github.com/PrefectHQ/prefect/issues/11619 + if callable(pendulum.tz.timezones): + timezones = pendulum.tz.timezones() + else: + timezones = pendulum.tz.timezones # if was provided, make sure its a valid IANA string - if v and v not in pendulum.tz.timezones: + if v and v not in timezones: raise ValueError(f'Invalid timezone: "{v}"') # otherwise infer the timezone from the anchor date elif v is None and values.get("anchor_date"): tz = values["anchor_date"].tz.name - if tz in pendulum.tz.timezones: + if tz in timezones: return tz # sometimes anchor dates have "timezones" that are UTC offsets # like "-04:00". This happens when parsing ISO8601 strings. @@ -141,7 +147,14 @@ class Config: @validator("timezone") def valid_timezone(cls, v): - if v and v not in pendulum.tz.timezones: + # pendulum.tz.timezones is a callable in 3.0 and above + # https://github.com/PrefectHQ/prefect/issues/11619 + if callable(pendulum.tz.timezones): + timezones = pendulum.tz.timezones() + else: + timezones = pendulum.tz.timezones + + if v and v not in timezones: raise ValueError( f'Invalid timezone: "{v}" (specify in IANA tzdata format, for example,' " America/New_York)" diff --git a/src/prefect/server/database/orm_models.py b/src/prefect/server/database/orm_models.py index 66fb6b238208..2ae491f1f9e3 100644 --- a/src/prefect/server/database/orm_models.py +++ b/src/prefect/server/database/orm_models.py @@ -446,14 +446,14 @@ def estimated_start_time_delta(self) -> datetime.timedelta: have a start time and are not in a final state and were expected to start already.""" if self.start_time and self.start_time > self.expected_start_time: - return (self.start_time - self.expected_start_time).as_interval() + return self.start_time - self.expected_start_time elif ( self.start_time is None and self.expected_start_time and self.expected_start_time < pendulum.now("UTC") and self.state_type not in schemas.states.TERMINAL_STATES ): - return (pendulum.now("UTC") - self.expected_start_time).as_interval() + return pendulum.now("UTC") - self.expected_start_time else: return datetime.timedelta(0) diff --git a/src/prefect/server/schemas/schedules.py b/src/prefect/server/schemas/schedules.py index 2de430bb6507..b24e56b1baa8 100644 --- a/src/prefect/server/schemas/schedules.py +++ b/src/prefect/server/schemas/schedules.py @@ -65,10 +65,10 @@ class IntervalSchedule(PrefectBaseModel): continue to fire at 9am in the local time zone. Args: - interval (datetime.timedelta): an interval to schedule on + interval (datetime.timedelta): an interval to schedule on. anchor_date (DateTimeTZ, optional): an anchor date to schedule increments against; - if not provided, the current timestamp will be used - timezone (str, optional): a valid timezone string + if not provided, the current timestamp will be used. + timezone (str, optional): a valid timezone string. """ class Config: @@ -93,14 +93,21 @@ def default_anchor_date(cls, v): @validator("timezone", always=True) def default_timezone(cls, v, *, values, **kwargs): + # pendulum.tz.timezones is a callable in 3.0 and above + # https://github.com/PrefectHQ/prefect/issues/11619 + if callable(pendulum.tz.timezones): + timezones = pendulum.tz.timezones() + else: + timezones = pendulum.tz.timezones + # if was provided, make sure its a valid IANA string - if v and v not in pendulum.tz.timezones: + if v and v not in timezones: raise ValueError(f'Invalid timezone: "{v}"') # otherwise infer the timezone from the anchor date elif v is None and values.get("anchor_date"): tz = values["anchor_date"].tz.name - if tz in pendulum.tz.timezones: + if tz in timezones: return tz # sometimes anchor dates have "timezones" that are UTC offsets # like "-04:00". This happens when parsing ISO8601 strings. @@ -248,7 +255,14 @@ class Config: @validator("timezone") def valid_timezone(cls, v): - if v and v not in pendulum.tz.timezones: + # pendulum.tz.timezones is a callable in 3.0 and above + # https://github.com/PrefectHQ/prefect/issues/11619 + if callable(pendulum.tz.timezones): + timezones = pendulum.tz.timezones() + else: + timezones = pendulum.tz.timezones + + if v and v not in timezones: raise ValueError( f'Invalid timezone: "{v}" (specify in IANA tzdata format, for example,' " America/New_York)" diff --git a/tests/cli/test_start_server.py b/tests/cli/test_start_server.py index ce794f49a17b..9454b15a4ac4 100644 --- a/tests/cli/test_start_server.py +++ b/tests/cli/test_start_server.py @@ -1,3 +1,4 @@ +import contextlib import os import signal import sys @@ -15,9 +16,11 @@ STARTUP_TIMEOUT = 20 SHUTDOWN_TIMEOUT = 20 +pytestmark = pytest.mark.flaky(max_runs=4) -@pytest.fixture(scope="function") -async def server_process(): + +@contextlib.asynccontextmanager +async def start_server_process(): """ Runs an instance of the server. Requires a port from 2222-2229 to be available. Uses the same database as the rest of the tests. @@ -60,8 +63,8 @@ async def server_process(): # Wait for the server to be ready async with httpx.AsyncClient() as client: - response = None with anyio.move_on_after(STARTUP_TIMEOUT): + response = None while True: try: response = await client.get(api_url + "/health") @@ -90,96 +93,105 @@ class TestUvicornSignalForwarding: sys.platform == "win32", reason="SIGTERM is only used in non-Windows environments", ) - async def test_sigint_sends_sigterm(self, server_process): - server_process.send_signal(signal.SIGINT) - with anyio.fail_after(SHUTDOWN_TIMEOUT): - await server_process.wait() - server_process.out.seek(0) - out = server_process.out.read().decode() - - assert "Sending SIGTERM" in out, ( - "When sending a SIGINT, the main process should send a SIGTERM to the" - f" uvicorn subprocess. Output:\n{out}" - ) + async def test_sigint_sends_sigterm(self): + async with start_server_process() as server_process: + server_process.send_signal(signal.SIGINT) + with anyio.fail_after(SHUTDOWN_TIMEOUT): + await server_process.wait() + server_process.out.seek(0) + out = server_process.out.read().decode() + + assert "Sending SIGTERM" in out, ( + "When sending a SIGINT, the main process should send a SIGTERM to the" + f" uvicorn subprocess. Output:\n{out}" + ) @pytest.mark.skipif( sys.platform == "win32", reason="SIGTERM is only used in non-Windows environments", ) - async def test_sigterm_sends_sigterm_directly(self, server_process): - server_process.send_signal(signal.SIGTERM) - with anyio.fail_after(SHUTDOWN_TIMEOUT): - await server_process.wait() - server_process.out.seek(0) - out = server_process.out.read().decode() - - assert "Sending SIGTERM" in out, ( - "When sending a SIGTERM, the main process should send a SIGTERM to the" - f" uvicorn subprocess. Output:\n{out}" - ) + async def test_sigterm_sends_sigterm_directly(self): + async with start_server_process() as server_process: + server_process.send_signal(signal.SIGTERM) + with anyio.fail_after(SHUTDOWN_TIMEOUT): + await server_process.wait() + server_process.out.seek(0) + out = server_process.out.read().decode() + + assert "Sending SIGTERM" in out, ( + "When sending a SIGTERM, the main process should send a SIGTERM to the" + f" uvicorn subprocess. Output:\n{out}" + ) @pytest.mark.skipif( sys.platform == "win32", reason="SIGTERM is only used in non-Windows environments", ) - async def test_sigint_sends_sigterm_then_sigkill(self, server_process): - server_process.send_signal(signal.SIGINT) - await anyio.sleep(0.001) # some time needed for the recursive signal handler - server_process.send_signal(signal.SIGINT) - with anyio.fail_after(SHUTDOWN_TIMEOUT): - await server_process.wait() - server_process.out.seek(0) - out = server_process.out.read().decode() - - assert ( - # either the main PID is still waiting for shutdown, so forwards the SIGKILL - "Sending SIGKILL" in out - # or SIGKILL came too late, and the main PID is already closing - or "KeyboardInterrupt" in out - or "Server stopped!" in out - ), ( - "When sending two SIGINT shortly after each other, the main process should" - " first send a SIGTERM and then a SIGKILL to the uvicorn subprocess." - f" Output:\n{out}" - ) + async def test_sigint_sends_sigterm_then_sigkill(self): + async with start_server_process() as server_process: + server_process.send_signal(signal.SIGINT) + await anyio.sleep( + 0.001 + ) # some time needed for the recursive signal handler + server_process.send_signal(signal.SIGINT) + with anyio.fail_after(SHUTDOWN_TIMEOUT): + await server_process.wait() + server_process.out.seek(0) + out = server_process.out.read().decode() + + assert ( + # either the main PID is still waiting for shutdown, so forwards the SIGKILL + "Sending SIGKILL" in out + # or SIGKILL came too late, and the main PID is already closing + or "KeyboardInterrupt" in out + or "Server stopped!" in out + ), ( + "When sending two SIGINT shortly after each other, the main process" + " should first send a SIGTERM and then a SIGKILL to the uvicorn" + f" subprocess. Output:\n{out}" + ) @pytest.mark.skipif( sys.platform == "win32", reason="SIGTERM is only used in non-Windows environments", ) - async def test_sigterm_sends_sigterm_then_sigkill(self, server_process): - server_process.send_signal(signal.SIGTERM) - await anyio.sleep(0.001) # some time needed for the recursive signal handler - server_process.send_signal(signal.SIGTERM) - with anyio.fail_after(SHUTDOWN_TIMEOUT): - await server_process.wait() - server_process.out.seek(0) - out = server_process.out.read().decode() - - assert ( - # either the main PID is still waiting for shutdown, so forwards the SIGKILL - "Sending SIGKILL" in out - # or SIGKILL came too late, and the main PID is already closing - or "KeyboardInterrupt" in out - or "Server stopped!" in out - ), ( - "When sending two SIGTERM shortly after each other, the main process should" - " first send a SIGTERM and then a SIGKILL to the uvicorn subprocess." - f" Output:\n{out}" - ) + async def test_sigterm_sends_sigterm_then_sigkill(self): + async with start_server_process() as server_process: + server_process.send_signal(signal.SIGTERM) + await anyio.sleep( + 0.001 + ) # some time needed for the recursive signal handler + server_process.send_signal(signal.SIGTERM) + with anyio.fail_after(SHUTDOWN_TIMEOUT): + await server_process.wait() + server_process.out.seek(0) + out = server_process.out.read().decode() + + assert ( + # either the main PID is still waiting for shutdown, so forwards the SIGKILL + "Sending SIGKILL" in out + # or SIGKILL came too late, and the main PID is already closing + or "KeyboardInterrupt" in out + or "Server stopped!" in out + ), ( + "When sending two SIGTERM shortly after each other, the main process" + " should first send a SIGTERM and then a SIGKILL to the uvicorn" + f" subprocess. Output:\n{out}" + ) @pytest.mark.skipif( sys.platform != "win32", reason="CTRL_BREAK_EVENT is only defined in Windows", ) - async def test_sends_ctrl_break_win32(self, server_process): - server_process.send_signal(signal.SIGINT) - with anyio.fail_after(SHUTDOWN_TIMEOUT): - await server_process.wait() - server_process.out.seek(0) - out = server_process.out.read().decode() - - assert "Sending CTRL_BREAK_EVENT" in out, ( - "When sending a SIGINT, the main process should send a CTRL_BREAK_EVENT to" - f" the uvicorn subprocess. Output:\n{out}" - ) + async def test_sends_ctrl_break_win32(self): + async with start_server_process() as server_process: + server_process.send_signal(signal.SIGINT) + with anyio.fail_after(SHUTDOWN_TIMEOUT): + await server_process.wait() + server_process.out.seek(0) + out = server_process.out.read().decode() + + assert "Sending CTRL_BREAK_EVENT" in out, ( + "When sending a SIGINT, the main process should send a" + f" CTRL_BREAK_EVENT to the uvicorn subprocess. Output:\n{out}" + ) diff --git a/tests/client/test_prefect_client.py b/tests/client/test_prefect_client.py index d45af9aca2ba..775d8d96d391 100644 --- a/tests/client/test_prefect_client.py +++ b/tests/client/test_prefect_client.py @@ -411,7 +411,7 @@ async def enter_client(context): assert startup.call_count == shutdown.call_count assert startup.call_count > 0 - @pytest.mark.flaky(max_runs=3) + @pytest.mark.flaky(max_runs=5) async def test_client_context_lifespan_is_robust_to_high_async_concurrency(self): startup, shutdown = MagicMock(), MagicMock() app = FastAPI(lifespan=make_lifespan(startup, shutdown)) diff --git a/tests/fixtures/database.py b/tests/fixtures/database.py index b2708c53ebe4..d8f7e68f413a 100644 --- a/tests/fixtures/database.py +++ b/tests/fixtures/database.py @@ -979,7 +979,7 @@ def hello(name: str): tags=["test"], flow_id=flow.id, schedule=schemas.schedules.IntervalSchedule( - interval=pendulum.duration(days=1).as_timedelta(), + interval=datetime.timedelta(days=1), anchor_date=pendulum.datetime(2020, 1, 1), ), path="./subdir", @@ -1006,7 +1006,7 @@ def hello(name: str): tags=["test"], flow_id=flow.id, schedule=schemas.schedules.IntervalSchedule( - interval=pendulum.duration(days=1).as_timedelta(), + interval=datetime.timedelta(days=1), anchor_date=pendulum.datetime(2020, 1, 1), ), path="./subdir", @@ -1037,7 +1037,7 @@ def hello(name: str): tags=["test"], flow_id=flow.id, schedule=schemas.schedules.IntervalSchedule( - interval=pendulum.duration(days=1).as_timedelta(), + interval=datetime.timedelta(days=1), anchor_date=pendulum.datetime(2020, 1, 1), ), path="./subdir", diff --git a/tests/server/api/test_run_history.py b/tests/server/api/test_run_history.py index 155e24b5deb6..6d836895d013 100644 --- a/tests/server/api/test_run_history.py +++ b/tests/server/api/test_run_history.py @@ -2,6 +2,7 @@ from typing import List import pendulum +from packaging import version from prefect._internal.pydantic import HAS_PYDANTIC_V2 @@ -116,10 +117,20 @@ def create_task_run(task_run): f_1 = await create_flow(flow=core.Flow(name="f-1", tags=["db", "blue"])) await create_flow(flow=core.Flow(name="f-2", tags=["db"])) + # Pendulum renamed 'period' method to 'interval' in 3.0 + # and changed weeks to start on Mondays + # https://github.com/PrefectHQ/prefect/issues/11619 + if version.parse(pendulum.__version__) >= version.parse("3.0"): + pendulum_interval = pendulum.interval + weekend_days = (5, 6) + else: + weekend_days = (0, 6) + pendulum_interval = pendulum.period + # have a completed flow every 12 hours except weekends - for d in pendulum.period(dt.subtract(days=14), dt).range("hours", 12): + for d in pendulum_interval(dt.subtract(days=14), dt).range("hours", 12): # skip weekends - if d.day_of_week in (0, 6): + if d.day_of_week in weekend_days: continue await create_flow_run( @@ -132,7 +143,7 @@ def create_task_run(task_run): ) # have a failed flow every 36 hours except the last 3 days - for d in pendulum.period(dt.subtract(days=14), dt).range("hours", 36): + for d in pendulum_interval(dt.subtract(days=14), dt).range("hours", 36): # skip recent runs if dt.subtract(days=3) <= d < dt: continue @@ -146,7 +157,7 @@ def create_task_run(task_run): ) # a few running runs in the last two days - for d in pendulum.period(dt.subtract(days=2), dt).range("hours", 6): + for d in pendulum_interval(dt.subtract(days=2), dt).range("hours", 6): await create_flow_run( flow_run=core.FlowRun( flow_id=f_1.id, @@ -156,7 +167,9 @@ def create_task_run(task_run): ) # schedule new runs - for d in pendulum.period(dt.subtract(days=1), dt.add(days=3)).range("hours", 6): + for d in pendulum_interval(dt.subtract(days=1), dt.add(days=3)).range( + "hours", 6 + ): await create_flow_run( flow_run=core.FlowRun( flow_id=f_1.id, @@ -258,9 +271,10 @@ async def test_history_returns_maximum_items(client, route): # only first 500 items returned assert len(response.json()) == status.HTTP_500_INTERNAL_SERVER_ERROR - assert min([r["interval_start"] for r in response.json()]) == str(dt) - assert max([r["interval_start"] for r in response.json()]) == str( - dt.add(minutes=499) + assert min([r["interval_start"] for r in response.json()]) == dt.isoformat() + assert ( + max([r["interval_start"] for r in response.json()]) + == dt.add(minutes=499).isoformat() ) diff --git a/tests/server/schemas/test_schedules.py b/tests/server/schemas/test_schedules.py index 2d0a30ddcf30..67e0b5dbf5b1 100644 --- a/tests/server/schemas/test_schedules.py +++ b/tests/server/schemas/test_schedules.py @@ -7,6 +7,7 @@ import pendulum import pytest from dateutil import rrule +from packaging import version from pendulum import datetime, now from prefect._internal.pydantic import HAS_PYDANTIC_V2 @@ -454,8 +455,13 @@ async def test_cron_schedule_hourly_daylight_savings_time_backward(self): dates = await s.get_dates(n=5, start=dt) assert [d.in_tz("America/New_York").hour for d in dates] == [23, 0, 1, 2, 3] - # skips an hour UTC - note cron clocks skip the "5" - assert [d.in_tz("UTC").hour for d in dates] == [3, 4, 6, 7, 8] + + # pendulum fixed a UTC-offset issue in 3.0 + # https://github.com/PrefectHQ/prefect/issues/11619 + if version.parse(pendulum.__version__) >= version.parse("3.0"): + assert [d.in_tz("UTC").hour for d in dates] == [3, 4, 5, 7, 8] + else: + assert [d.in_tz("UTC").hour for d in dates] == [3, 4, 6, 7, 8] async def test_cron_schedule_daily_start_daylight_savings_time_forward(self): """ diff --git a/tests/server/utilities/test_schemas.py b/tests/server/utilities/test_schemas.py index 0c6b8c18d659..8018adfa367c 100644 --- a/tests/server/utilities/test_schemas.py +++ b/tests/server/utilities/test_schemas.py @@ -151,7 +151,7 @@ def test_json_compatible(self): assert isinstance(d2["x"], str) and d2["x"] == str(model.x) assert isinstance(d1["y"], datetime.datetime) and d1["y"] == model.y - assert isinstance(d2["y"], str) and d2["y"] == str(model.y) + assert isinstance(d2["y"], str) and d2["y"] == model.y.isoformat() def test_json_applies_to_nested(self, nested): d1 = nested.dict(json_compatible=True) diff --git a/tests/workers/test_base_worker.py b/tests/workers/test_base_worker.py index 653c245d303a..d3505b764d22 100644 --- a/tests/workers/test_base_worker.py +++ b/tests/workers/test_base_worker.py @@ -4,6 +4,7 @@ import anyio import pendulum +from packaging import version from prefect._internal.pydantic import HAS_PYDANTIC_V2 @@ -1965,55 +1966,105 @@ async def test_worker_set_last_polled_time( work_pool, ): now = pendulum.now("utc") - # https://github.com/sdispater/pendulum/blob/master/docs/docs/testing.md - pendulum.set_test_now(now) - async with WorkerTestImpl(work_pool_name=work_pool.name) as worker: - # initially, the worker should have _last_polled_time set to now - assert worker._last_polled_time == now + # https://github.com/PrefectHQ/prefect/issues/11619 + # Pendulum 3 Test Case + if version.parse(pendulum.__version__) >= version.parse("3.0"): + # https://github.com/sdispater/pendulum/blob/master/docs/docs/testing.md + with pendulum.travel_to(now, freeze=True): + async with WorkerTestImpl(work_pool_name=work_pool.name) as worker: + # initially, the worker should have _last_polled_time set to now + assert worker._last_polled_time == now + + # some arbitrary delta forward + now2 = now.add(seconds=49) + with pendulum.travel_to(now2, freeze=True): + await worker.get_and_submit_flow_runs() + assert worker._last_polled_time == now2 + + # some arbitrary datetime + now3 = pendulum.datetime(2021, 1, 1, 0, 0, 0, tz="utc") + with pendulum.travel_to(now3, freeze=True): + await worker.get_and_submit_flow_runs() + assert worker._last_polled_time == now3 + + # Pendulum 2 Test Case + else: + pendulum.set_test_now(now) + async with WorkerTestImpl(work_pool_name=work_pool.name) as worker: + # initially, the worker should have _last_polled_time set to now + assert worker._last_polled_time == now - # some arbitrary delta forward - now2 = now.add(seconds=49) - pendulum.set_test_now(now2) - await worker.get_and_submit_flow_runs() - assert worker._last_polled_time == now2 + # some arbitrary delta forward + now2 = now.add(seconds=49) + pendulum.set_test_now(now2) + await worker.get_and_submit_flow_runs() + assert worker._last_polled_time == now2 - # some arbitrary datetime - now3 = pendulum.datetime(2021, 1, 1, 0, 0, 0, tz="utc") - pendulum.set_test_now(now3) - await worker.get_and_submit_flow_runs() - assert worker._last_polled_time == now3 + # some arbitrary datetime + now3 = pendulum.datetime(2021, 1, 1, 0, 0, 0, tz="utc") + pendulum.set_test_now(now3) + await worker.get_and_submit_flow_runs() + assert worker._last_polled_time == now3 - # cleanup mock - pendulum.set_test_now() + # cleanup mock + pendulum.set_test_now() async def test_worker_last_polled_health_check( work_pool, ): now = pendulum.now("utc") - # https://github.com/sdispater/pendulum/blob/master/docs/docs/testing.md - pendulum.set_test_now(now) - async with WorkerTestImpl(work_pool_name=work_pool.name) as worker: - resp = worker.is_worker_still_polling(query_interval_seconds=10) - assert resp is True + # https://github.com/PrefectHQ/prefect/issues/11619 + # Pendulum 3 Test Case + if version.parse(pendulum.__version__) >= version.parse("3.0"): + # https://github.com/sdispater/pendulum/blob/master/docs/docs/testing.md + pendulum.travel_to(now, freeze=True) + + async with WorkerTestImpl(work_pool_name=work_pool.name) as worker: + resp = worker.is_worker_still_polling(query_interval_seconds=10) + assert resp is True + + with pendulum.travel(seconds=299): + resp = worker.is_worker_still_polling(query_interval_seconds=10) + assert resp is True + + with pendulum.travel(seconds=301): + resp = worker.is_worker_still_polling(query_interval_seconds=10) + assert resp is False + + with pendulum.travel(minutes=30): + resp = worker.is_worker_still_polling(query_interval_seconds=60) + assert resp is True + + with pendulum.travel(minutes=30, seconds=1): + resp = worker.is_worker_still_polling(query_interval_seconds=60) + assert resp is False + + # Pendulum 2 Test Case + else: + pendulum.set_test_now(now) + + async with WorkerTestImpl(work_pool_name=work_pool.name) as worker: + resp = worker.is_worker_still_polling(query_interval_seconds=10) + assert resp is True - pendulum.set_test_now(now.add(seconds=299)) - resp = worker.is_worker_still_polling(query_interval_seconds=10) - assert resp is True + pendulum.set_test_now(now.add(seconds=299)) + resp = worker.is_worker_still_polling(query_interval_seconds=10) + assert resp is True - pendulum.set_test_now(now.add(seconds=301)) - resp = worker.is_worker_still_polling(query_interval_seconds=10) - assert resp is False + pendulum.set_test_now(now.add(seconds=301)) + resp = worker.is_worker_still_polling(query_interval_seconds=10) + assert resp is False - pendulum.set_test_now(now.add(minutes=30)) - resp = worker.is_worker_still_polling(query_interval_seconds=60) - assert resp is True + pendulum.set_test_now(now.add(minutes=30)) + resp = worker.is_worker_still_polling(query_interval_seconds=60) + assert resp is True - pendulum.set_test_now(now.add(minutes=30, seconds=1)) - resp = worker.is_worker_still_polling(query_interval_seconds=60) - assert resp is False + pendulum.set_test_now(now.add(minutes=30, seconds=1)) + resp = worker.is_worker_still_polling(query_interval_seconds=60) + assert resp is False - # cleanup mock - pendulum.set_test_now() + # cleanup mock + pendulum.set_test_now() diff --git a/versioneer.py b/versioneer.py index ac6490806e5a..6fce0a8cb653 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,4 +1,5 @@ # Version: 0.20 +# versioneer is on 0.29, would be good to update """The Versioneer - like a rocketeer, but for versions. @@ -1719,7 +1720,7 @@ def get_version(): def get_cmdclass(cmdclass=None): - """Get the custom setuptools/distutils subclasses used by Versioneer. + """Get the custom setuptools subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. @@ -1741,8 +1742,7 @@ def get_cmdclass(cmdclass=None): cmds = {} if cmdclass is None else cmdclass.copy() - # we add "version" to both distutils and setuptools - from distutils.core import Command + from setuptools import Command class cmd_version(Command): description = "report generated version string" @@ -1786,8 +1786,6 @@ def run(self): _build_py = cmds["build_py"] elif "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py - else: - from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): @@ -1808,8 +1806,6 @@ def run(self): _build_ext = cmds["build_ext"] elif "setuptools" in sys.modules: from setuptools.command.build_ext import build_ext as _build_ext - else: - from distutils.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): def run(self): @@ -1869,7 +1865,7 @@ def run(self): del cmds["build_py"] if "py2exe" in sys.modules: # py2exe enabled? - from py2exe.distutils_buildexe import py2exe as _py2exe + from py2exe.setuptools_buildexe import py2exe as _py2exe class cmd_py2exe(_py2exe): def run(self): @@ -1902,8 +1898,6 @@ def run(self): _sdist = cmds["sdist"] elif "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist - else: - from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self):