diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index ba75c3090f..82cb8b43ec 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -20,6 +20,7 @@ on: branches: - 'pull-request/**' - 'branch-*' + - 'main' # This allows a subsequently queued workflow run to interrupt previous runs concurrency: diff --git a/CHANGELOG.md b/CHANGELOG.md index 016ac721ad..4160330f51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,19 @@ See the License for the specific language governing permissions and limitations under the License. --> +# Morpheus 24.10.01 (22 Nov 2024) + +## 🐛 Bug Fixes + +- Pin mlflow version to avoid breaking changes in v2.18 ([#2067](https://github.com/nv-morpheus/Morpheus/pull/2067)) [@dagardner-nv](https://github.com/dagardner-nv) +- Execute CI on the main branch ([#2064](https://github.com/nv-morpheus/Morpheus/pull/2064)) [@dagardner-nv](https://github.com/dagardner-nv) + +## 📖 Documentation + +- Remove references to pipeline-ae in docs ([#2063](https://github.com/nv-morpheus/Morpheus/pull/2063)) [@dagardner-nv](https://github.com/dagardner-nv) +- Document location of third party source repository ([#2059](https://github.com/nv-morpheus/Morpheus/pull/2059)) [@dagardner-nv](https://github.com/dagardner-nv) +- Update DFP class and file paths ([#2052](https://github.com/nv-morpheus/Morpheus/pull/2052)) [@dagardner-nv](https://github.com/dagardner-nv) + # Morpheus 24.10.00 (01 Nov 2024) ## 🚨 Breaking Changes diff --git a/ci/conda/recipes/morpheus-libs/meta.yaml b/ci/conda/recipes/morpheus-libs/meta.yaml index 747f734371..f6d88716bf 100644 --- a/ci/conda/recipes/morpheus-libs/meta.yaml +++ b/ci/conda/recipes/morpheus-libs/meta.yaml @@ -91,7 +91,7 @@ outputs: - feedparser =6.0.* - grpcio =1.62.* - lxml - - mlflow>=2.10.0,<3 + - mlflow>=2.10.0,<2.18 - mrc - networkx=2.8.8 - numpydoc =1.5.* @@ -239,7 +239,7 @@ outputs: - scripts/fetch_data.py - tests/* script: morpheus_llm_test.sh - + about: home: https://github.com/nv-morpheus/Morpheus license: Apache-2.0 diff --git a/ci/conda/recipes/morpheus/meta.yaml b/ci/conda/recipes/morpheus/meta.yaml index 23327bcbc7..fd60e49243 100644 --- a/ci/conda/recipes/morpheus/meta.yaml +++ b/ci/conda/recipes/morpheus/meta.yaml @@ -97,7 +97,7 @@ outputs: - feedparser =6.0.* - grpcio =1.62.* - libwebp>=1.3.2 # Required for CVE mitigation: https://nvd.nist.gov/vuln/detail/CVE-2023-4863 - - mlflow>=2.10.0,<3 + - mlflow>=2.10.0,<2.18 - mrc - networkx=2.8.8 - numpydoc =1.5.* diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index 534f26b519..0761a09c9a 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -113,6 +113,9 @@ sed_runner "s/${CURRENT_SHORT_TAG}/${NEXT_SHORT_TAG}/g" docs/source/getting_star sed_runner "s|blob/branch-${CURRENT_SHORT_TAG}|blob/branch-${NEXT_SHORT_TAG}|g" models/model-cards/*.md sed_runner "s|tree/branch-${CURRENT_SHORT_TAG}|tree/branch-${NEXT_SHORT_TAG}|g" models/model-cards/*.md +# thirdparty +sed_runner "s|tree/branch-${CURRENT_SHORT_TAG}|tree/branch-${NEXT_SHORT_TAG}|g" thirdparty/README.md + # Update the version of the Morpheus model container # We need to update several files, however we need to avoid symlinks as well as the build and .cache directories DOCS_MD_FILES=$(find -P ./docs/source/ -type f -iname "*.md") diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index b913be445b..6f4c5c12e9 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -66,7 +66,7 @@ dependencies: - libtool - libwebp=1.3.2 - libzlib >=1.3.1,<2 -- mlflow +- mlflow>=2.10.0,<2.18 - mrc=24.10 - myst-parser=0.18.1 - nbsphinx diff --git a/conda/environments/dev_cuda-125_arch-x86_64.yaml b/conda/environments/dev_cuda-125_arch-x86_64.yaml index f27becb108..a4ff228a9c 100644 --- a/conda/environments/dev_cuda-125_arch-x86_64.yaml +++ b/conda/environments/dev_cuda-125_arch-x86_64.yaml @@ -56,7 +56,7 @@ dependencies: - libtool - libwebp=1.3.2 - libzlib >=1.3.1,<2 -- mlflow +- mlflow>=2.10.0,<2.18 - mrc=24.10 - myst-parser=0.18.1 - nbsphinx diff --git a/conda/environments/examples_cuda-125_arch-x86_64.yaml b/conda/environments/examples_cuda-125_arch-x86_64.yaml index 14ba7e9c8c..0499d1690f 100644 --- a/conda/environments/examples_cuda-125_arch-x86_64.yaml +++ b/conda/environments/examples_cuda-125_arch-x86_64.yaml @@ -29,7 +29,7 @@ dependencies: - jsonpatch>=1.33 - kfp - libwebp=1.3.2 -- mlflow +- mlflow>=2.10.0,<2.18 - mrc=24.10 - networkx=2.8.8 - newspaper3k=0.2 diff --git a/conda/environments/runtime_cuda-125_arch-x86_64.yaml b/conda/environments/runtime_cuda-125_arch-x86_64.yaml index 2551739061..edfa5b103e 100644 --- a/conda/environments/runtime_cuda-125_arch-x86_64.yaml +++ b/conda/environments/runtime_cuda-125_arch-x86_64.yaml @@ -26,7 +26,7 @@ dependencies: - grpcio - grpcio-status - libwebp=1.3.2 -- mlflow +- mlflow>=2.10.0,<2.18 - mrc=24.10 - networkx=2.8.8 - numpydoc=1.5 diff --git a/dependencies.yaml b/dependencies.yaml index 8de432eb24..43484fde9a 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -376,7 +376,7 @@ dependencies: - grpcio - grpcio-status # - libwebp=1.3.2 # Required for CVE mitigation: https://nvd.nist.gov/vuln/detail/CVE-2023-4863 ## - - mlflow #>=2.10.0,<3 + - mlflow>=2.10.0,<2.18 # Pin version to avoid breaking change in 2.18 to thread local variable code commit id: 5541888 - mrc=24.10 - networkx=2.8.8 - numpydoc=1.5 diff --git a/docker/Dockerfile b/docker/Dockerfile index 5fa06726de..929d4fd005 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -363,6 +363,7 @@ COPY --from=git_clone "/tmp/morpheus_repo/examples" "./examples" COPY --from=git_clone "/tmp/morpheus_repo/scripts" "./scripts" COPY --from=git_clone "/tmp/morpheus_repo/*.md" "./" COPY --from=git_clone "/tmp/morpheus_repo/LICENSE" "./" +COPY --from=git_clone "/tmp/morpheus_repo/thirdparty" "./thirdparty" RUN /opt/conda/bin/conda clean -afy && \ # Ensure the conda-bld directory is indexed even if empty diff --git a/docs/source/developer_guide/guides/10_modular_pipeline_digital_fingerprinting.md b/docs/source/developer_guide/guides/10_modular_pipeline_digital_fingerprinting.md index d55f94b86e..83a7e564e3 100644 --- a/docs/source/developer_guide/guides/10_modular_pipeline_digital_fingerprinting.md +++ b/docs/source/developer_guide/guides/10_modular_pipeline_digital_fingerprinting.md @@ -71,7 +71,7 @@ The front-end loader outputs one or more control messages that are passed to the Moreover, the updated pipeline supports human-in-the-loop workflows, such as the ability to manually trigger training or inference tasks against a specific set of data, and the capacity for real-time labeling of production inference events that can be injected back into the training pipeline. -The following content will track the pipeline declared in `examples/digital_fingerprinting/production/morpheus/dfp_integrated_training_streaming_pipeline.py` +The following content will track the pipeline declared in `examples/digital_fingerprinting/production/dfp_integrated_training_streaming_pipeline.py` ```python # Setup and command line argument parsing @@ -115,7 +115,7 @@ For a full introduction to Morpheus modules, refer to the [Python Modules](7_pyt ## DFP Deployment -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_deployment.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_deployment.py` This is the top level module that encapsulates the entire Digital Fingerprinting pipeline, it is primarily responsible for wrapping the training and inference pipelines, providing the correct module interface, and doing some configuration pre-processing. Since this module is monolithic, it supports a significant number of configuration options; however, the majority of these have intelligent defaults and are not required to be specified. @@ -162,7 +162,7 @@ There are a number of modules that are used in both the training and inference p ### DFP Preprocessing -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_preproc.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_preproc.py` The `dfp_preproc` module is a functional component within the Morpheus framework that combines multiple data filtering and processing pipeline modules related to inference and training. This module simplifies the pipeline by consolidating various modules into a single, cohesive unit. The `dfp_preproc` module supports configuration parameters such as the cache directory, timestamp column name, pre-filter options, batching options, user splitting options, and supported data loaders for various file types. @@ -233,7 +233,7 @@ For a complete reference, refer to: [DataLoader Module](../../modules/core/data_ ### DFP Split Users -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_split_users.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_split_users.py` The `dfp_split_users` module is responsible for splitting the input data based on user IDs. The module provides configuration options, such as fallback username, include generic user, include individual users, and specify lists of user IDs to include or exclude in the output. @@ -250,7 +250,7 @@ def dfp_split_users(builder: mrc.Builder): ### DFP Rolling Window -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_rolling_window.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_rolling_window.py` The `dfp_rolling_window` module is responsible for maintaining a rolling window of historical data, acting as a streaming caching and batching system. The module provides various configuration options, such as aggregation span, cache directory, caching options, timestamp column name, and trigger conditions. @@ -271,7 +271,7 @@ def dfp_rolling_window(builder: mrc.Builder): ### DFP Data Prep -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_data_prep.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_data_prep.py` The `dfp_data_prep` module is responsible for preparing data for either inference or model training. The module requires a defined schema for data preparation. @@ -288,7 +288,7 @@ def dfp_data_prep(builder: mrc.Builder): ## DFP Training Pipeline -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_training_pipe.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_training_pipe.py` The DFP Training Pipe module is a consolidated module that integrates several DFP pipeline modules that are essential to the training process. This module function provides a single entry point to the training pipeline, simplifying the process of training a model. The module offers configurable parameters for various stages in the pipeline, including data batching, data preprocessing, and data encoding for model training. Additionally, the MLflow model writer options allow for the trained model to be saved for future use. @@ -326,7 +326,7 @@ def dfp_training_pipe(builder: mrc.Builder): ### DFP Training -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_training.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_training.py` The `dfp_training` module function is responsible for training the model. The `on_data` function is defined to handle incoming `ControlMessage` instances. It retrieves the user ID and the input data from the `ControlMessage`, creates an instance of the `AutoEncoder` class with the specified `model_kwargs`, and trains the model on the input data. The output message includes the trained model and metadata. @@ -358,7 +358,7 @@ def mlflow_model_writer(builder: mrc.Builder): ## DFP Inference Pipeline -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_inference_pipe.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_inference_pipe.py` The `dfp_inference_pipe` module function consolidates multiple digital fingerprinting pipeline (DFP) modules relevant to the inference process into a single module. Its purpose is to simplify the creation and configuration of an inference pipeline by combining all necessary components. @@ -407,7 +407,7 @@ def dfp_inference_pipe(builder: mrc.Builder): ### DFP Inference -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_inference.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_inference.py` The `dfp_inference` module function creates an inference module that retrieves trained models and performs inference on the input data. The module requires a `model_name_formatter` and a `fallback_username` to be configured in its parameters. @@ -443,7 +443,7 @@ For a complete reference, refer to: [Filter Detections](../../modules/core/filte ### DFP Post Processing -Source: `examples/digital_fingerprinting/production/morpheus/dfp/modules/dfp_postprocessing.py` +Source: `python/morpheus_dfp/morpheus_dfp/modules/dfp_postprocessing.py` The `dfp_postprocessing` module function performs post-processing tasks on the input data. diff --git a/docs/source/developer_guide/guides/6_digital_fingerprinting_reference.md b/docs/source/developer_guide/guides/6_digital_fingerprinting_reference.md index d60f64f19e..2bba0ded89 100644 --- a/docs/source/developer_guide/guides/6_digital_fingerprinting_reference.md +++ b/docs/source/developer_guide/guides/6_digital_fingerprinting_reference.md @@ -174,7 +174,7 @@ Subclass of `DateTimeColumn`, counts the unique occurrences of a value in `group ![Input Stages](img/dfp_input_config.png) #### Source Stage (`MultiFileSource`) -The `MultiFileSource` (`examples/digital_fingerprinting/production/morpheus/dfp/stages/multi_file_source.py`) receives a path or list of paths (`filenames`), and will collectively be emitted into the pipeline as an [`fsspec.core.OpenFiles`](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.core.OpenFiles) object. The paths may include wildcards `*` as well as URLs (ex: `s3://path`) to remote storage providers such as S3, FTP, GCP, Azure, Databricks and others as defined by [`fsspec`](https://filesystem-spec.readthedocs.io/en/latest/api.html?highlight=open_files#fsspec.open_files). In addition to this paths can be cached locally by prefixing them with `filecache::` (ex: `filecache::s3://bucket-name/key-name`). +The `MultiFileSource` (`python/morpheus/morpheus/modules/input/multi_file_source.py`) receives a path or list of paths (`filenames`), and will collectively be emitted into the pipeline as an [`fsspec.core.OpenFiles`](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.core.OpenFiles) object. The paths may include wildcards `*` as well as URLs (ex: `s3://path`) to remote storage providers such as S3, FTP, GCP, Azure, Databricks and others as defined by [`fsspec`](https://filesystem-spec.readthedocs.io/en/latest/api.html?highlight=open_files#fsspec.open_files). In addition to this paths can be cached locally by prefixing them with `filecache::` (ex: `filecache::s3://bucket-name/key-name`). > **Note:** This stage does not actually download the data files, allowing the file list to be filtered and batched prior to being downloaded. @@ -187,7 +187,7 @@ The `MultiFileSource` (`examples/digital_fingerprinting/production/morpheus/dfp/ #### File Batcher Stage (`DFPFileBatcherStage`) -The `DFPFileBatcherStage` (`examples/digital_fingerprinting/production/morpheus/dfp/stages/dfp_file_batcher_stage.py`) groups data in the incoming `DataFrame` in batches of a time period (per day default), and optionally filtering incoming data to a specific time window. This stage can potentially improve performance by combining multiple small files into a single batch. This stage assumes that the date of the logs can be easily inferred such as encoding the creation time in the file name (for example, `AUTH_LOG-2022-08-21T22.05.23Z.json`), or using the modification time as reported by the file system. The actual method for extracting the date is encoded in a user-supplied `date_conversion_func` function (more on this later). +The `DFPFileBatcherStage` (`python/morpheus_dfp/morpheus_dfp/stages/dfp_file_batcher_stage.py`) groups data in the incoming `DataFrame` in batches of a time period (per day default), and optionally filtering incoming data to a specific time window. This stage can potentially improve performance by combining multiple small files into a single batch. This stage assumes that the date of the logs can be easily inferred such as encoding the creation time in the file name (for example, `AUTH_LOG-2022-08-21T22.05.23Z.json`), or using the modification time as reported by the file system. The actual method for extracting the date is encoded in a user-supplied `date_conversion_func` function (more on this later). | Argument | Type | Description | | -------- | ---- | ----------- | @@ -219,7 +219,7 @@ pipeline.add_stage( > **Note:** If `date_conversion_func` returns time-zone aware timestamps, then `start_time` and `end_time` if not `None` need to also be timezone aware `datetime` objects. #### File to DataFrame Stage (`DFPFileToDataFrameStage`) -The `DFPFileToDataFrameStage` (`examples/digital_fingerprinting/production/morpheus/dfp/stages/dfp_file_to_df.py`) stage receives a `list` of an [`fsspec.core.OpenFiles`](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.core.OpenFiles) and loads them into a single `DataFrame` which is then emitted into the pipeline. When the parent stage is `DFPFileBatcherStage` each batch (typically one day) is concatenated into a single `DataFrame`. If the parent was `MultiFileSource` the entire dataset is loaded into a single `DataFrame`. Because of this, it is important to choose a `period` argument for `DFPFileBatcherStage` small enough such that each batch can fit into memory. +The `DFPFileToDataFrameStage` (`python/morpheus_dfp/morpheus_dfp/stages/dfp_file_to_df.py`) stage receives a `list` of an [`fsspec.core.OpenFiles`](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.core.OpenFiles) and loads them into a single `DataFrame` which is then emitted into the pipeline. When the parent stage is `DFPFileBatcherStage` each batch (typically one day) is concatenated into a single `DataFrame`. If the parent was `MultiFileSource` the entire dataset is loaded into a single `DataFrame`. Because of this, it is important to choose a `period` argument for `DFPFileBatcherStage` small enough such that each batch can fit into memory. | Argument | Type | Description | | -------- | ---- | ----------- | @@ -251,7 +251,7 @@ This final stage will write all received messages to a single output file in eit | `overwrite` | `bool` | Optional, defaults to `False`. If the file specified in `filename` already exists, it will be overwritten if this option is set to `True` | #### Write to S3 Stage (`WriteToS3Stage`) -The {py:obj}`~dfp.stages.write_to_s3_stage.WriteToS3Stage` stage writes the resulting anomaly detections to S3. The `WriteToS3Stage` decouples the S3 specific operations from the Morpheus stage, and as such receives an `s3_writer` argument. +The {py:obj}`~morpheus_dfp.stages.write_to_s3_stage.WriteToS3Stage` stage writes the resulting anomaly detections to S3. The `WriteToS3Stage` decouples the S3 specific operations from the Morpheus stage, and as such receives an `s3_writer` argument. | Argument | Type | Description | | -------- | ---- | ----------- | @@ -262,7 +262,7 @@ The {py:obj}`~dfp.stages.write_to_s3_stage.WriteToS3Stage` stage writes the resu These stages are common to both the training and inference pipelines, unlike the input and output stages these are specific to the DFP pipeline and intended to be configured but not replaceable. #### Split Users Stage (`DFPSplitUsersStage`) -The {py:obj}`~dfp.stages.dfp_split_users_stage.DFPSplitUsersStage` stage receives an incoming `DataFrame` and emits a `list` of `DFPMessageMeta` where each `DFPMessageMeta` represents the records associated for a given user. This allows for downstream stages to perform all necessary operations on a per user basis. +The {py:obj}`~morpheus_dfp.stages.dfp_split_users_stage.DFPSplitUsersStage` stage receives an incoming `DataFrame` and emits a `list` of `DFPMessageMeta` where each `DFPMessageMeta` represents the records associated for a given user. This allows for downstream stages to perform all necessary operations on a per user basis. | Argument | Type | Description | | -------- | ---- | ----------- | @@ -273,7 +273,7 @@ The {py:obj}`~dfp.stages.dfp_split_users_stage.DFPSplitUsersStage` stage receive | `only_users` | `List[str]` or `None` | Limit records to a specific list of users, when `include_generic` is `True` the generic user's records will also be limited to the users in this list. Mutually exclusive with `skip_users`. | #### Rolling Window Stage (`DFPRollingWindowStage`) -The {py:obj}`~dfp.stages.dfp_rolling_window_stage.DFPRollingWindowStage` stage performs several key pieces of functionality for DFP. +The {py:obj}`~morpheus_dfp.stages.dfp_rolling_window_stage.DFPRollingWindowStage` stage performs several key pieces of functionality for DFP. 1. This stage keeps a moving window of logs on a per user basis @@ -299,7 +299,7 @@ The {py:obj}`~dfp.stages.dfp_rolling_window_stage.DFPRollingWindowStage` stage p > **Note:** this stage computes a row hash for the first and last rows of the incoming `DataFrame` as such all data contained must be hashable, any non-hashable values such as `lists` should be dropped or converted into hashable types in the `DFPFileToDataFrameStage`. #### Preprocessing Stage (`DFPPreprocessingStage`) -The {py:obj}`~dfp.stages.dfp_preprocessing_stage.DFPPreprocessingStage` stage, the actual logic of preprocessing is defined in the `input_schema` argument. Since this stage occurs in the pipeline after the `DFPFileBatcherStage` and `DFPSplitUsersStage` stages all records in the incoming `DataFrame` correspond to only a single user within a specific time period allowing for columns to be computer on a per-user per-time period basis such as the `logcount` and `locincrement` features mentioned above. Making the type of processing performed in this stage different from those performed in the `DFPFileToDataFrameStage`. +The {py:obj}`~morpheus_dfp.stages.dfp_preprocessing_stage.DFPPreprocessingStage` stage, the actual logic of preprocessing is defined in the `input_schema` argument. Since this stage occurs in the pipeline after the `DFPFileBatcherStage` and `DFPSplitUsersStage` stages all records in the incoming `DataFrame` correspond to only a single user within a specific time period allowing for columns to be computer on a per-user per-time period basis such as the `logcount` and `locincrement` features mentioned above. Making the type of processing performed in this stage different from those performed in the `DFPFileToDataFrameStage`. | Argument | Type | Description | | -------- | ---- | ----------- | @@ -316,7 +316,7 @@ After training the generic model, individual user models can be trained. Individ ### Training Stages #### Training Stage (`DFPTraining`) -The {py:obj}`~dfp.stages.dfp_training.DFPTraining` trains a model for each incoming `DataFrame` and emits an instance of `morpheus.messages.ControlMessage` containing the trained model. +The {py:obj}`~morpheus_dfp.stages.dfp_training.DFPTraining` trains a model for each incoming `DataFrame` and emits an instance of `morpheus.messages.ControlMessage` containing the trained model. | Argument | Type | Description | | -------- | ---- | ----------- | @@ -326,7 +326,7 @@ The {py:obj}`~dfp.stages.dfp_training.DFPTraining` trains a model for each incom | `validation_size` | `float` | Proportion of the input dataset to use for training validation. Should be between 0.0 and 1.0. Default is 0.0.| #### MLflow Model Writer Stage (`DFPMLFlowModelWriterStage`) -The {py:obj}`~dfp.stages.dfp_mlflow_model_writer.DFPMLFlowModelWriterStage` stage publishes trained models into MLflow, skipping any model which lacked sufficient training data (current required minimum is 300 log records). +The {py:obj}`~morpheus_dfp.stages.dfp_mlflow_model_writer.DFPMLFlowModelWriterStage` stage publishes trained models into MLflow, skipping any model which lacked sufficient training data (current required minimum is 300 log records). | Argument | Type | Description | | -------- | ---- | ----------- | @@ -343,7 +343,7 @@ The {py:obj}`~dfp.stages.dfp_mlflow_model_writer.DFPMLFlowModelWriterStage` stag ### Inference Stages #### Inference Stage (`DFPInferenceStage`) -The {py:obj}`~dfp.stages.dfp_inference_stage.DFPInferenceStage` stage loads models from MLflow and performs inferences against those models. This stage emits a message containing the original `DataFrame` along with new columns containing the z score (`mean_abs_z`), as well as the name and version of the model that generated that score (`model_version`). For each feature in the model, three additional columns will also be added: +The {py:obj}`~morpheus_dfp.stages.dfp_inference_stage.DFPInferenceStage` stage loads models from MLflow and performs inferences against those models. This stage emits a message containing the original `DataFrame` along with new columns containing the z score (`mean_abs_z`), as well as the name and version of the model that generated that score (`model_version`). For each feature in the model, three additional columns will also be added: * `_loss` : The loss * `_z_loss` : The loss z-score * `_pred` : The predicted value @@ -370,4 +370,4 @@ The {py:obj}`~morpheus.stages.postprocess.filter_detections_stage.FilterDetectio | `field_name` | `str` | `probs` | Name of the tensor (`filter_source=FilterSource.TENSOR`) or DataFrame column (`filter_source=FilterSource.DATAFRAME`) to use as the filter criteria. | #### Post Processing Stage (`DFPPostprocessingStage`) -The {py:obj}`~dfp.stages.dfp_postprocessing_stage.DFPPostprocessingStage` stage adds a new `event_time` column to the DataFrame indicating the time which Morpheus detected the anomalous messages, and replaces any `NAN` values with the a string value of `'NaN'`. +The {py:obj}`~morpheus_dfp.stages.dfp_postprocessing_stage.DFPPostprocessingStage` stage adds a new `event_time` column to the DataFrame indicating the time which Morpheus detected the anomalous messages, and replaces any `NAN` values with the a string value of `'NaN'`. diff --git a/docs/source/extra_info/troubleshooting.md b/docs/source/extra_info/troubleshooting.md index 5f3b14be20..4f25115ca8 100644 --- a/docs/source/extra_info/troubleshooting.md +++ b/docs/source/extra_info/troubleshooting.md @@ -48,7 +48,7 @@ Error trying to get model Traceback (most recent call last): -File "/workspace/examples/digital_fingerprinting/production/morpheus/dfp/stages/dfp_inference_stage.py", line 101, in on_data +File "/workspace/python/morpheus_dfp/morpheus_dfp/stages/dfp_inference_stage.py", line 101, in on_data loaded_model = model_cache.load_model(self._client) ``` diff --git a/docs/source/getting_started.md b/docs/source/getting_started.md index 02e430ea6e..0ccceba847 100644 --- a/docs/source/getting_started.md +++ b/docs/source/getting_started.md @@ -376,35 +376,6 @@ Commands: validate Validate pipeline output for testing. ``` -And for the AE pipeline: - -``` -$ morpheus run pipeline-ae --help -Usage: morpheus run pipeline-ae [OPTIONS] COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]... - - - -Commands: - add-class Add detected classifications to each message. - add-scores Add probability scores to each message. - buffer (Deprecated) Buffer results. - delay (Deprecated) Delay results for a certain duration. - filter Filter message by a classification threshold. - from-azure Source stage is used to load Azure Active Directory messages. - from-cloudtrail Load messages from a CloudTrail directory. - from-duo Source stage is used to load Duo Authentication messages. - inf-pytorch Perform inference with PyTorch. - inf-triton Perform inference with Triton Inference Server. - monitor Display throughput numbers at a specific point in the pipeline. - preprocess Prepare Autoencoder input DataFrames for inference. - serialize Includes & excludes columns from messages. - timeseries Perform time series anomaly detection and add prediction. - to-file Write all messages to a file. - to-kafka Write all messages to a Kafka cluster. - train-ae Train an Autoencoder model on incoming data. - trigger Buffer data until the previous stage has completed. - validate Validate pipeline output for testing. -``` > **Note**: The available commands for different types of pipelines are not the same. This means that the same stage may have different options when used in different pipelines. Check the CLI help for the most up-to-date information during development. ## Next Steps diff --git a/thirdparty/README.md b/thirdparty/README.md new file mode 100644 index 0000000000..275ede30b7 --- /dev/null +++ b/thirdparty/README.md @@ -0,0 +1,20 @@ + + +# Source Code for OSS Packages in the NVIDIA Morpheus Docker container + +The source code for OSS packages which are included in the NVIDIA Morpheus Docker image is available at [https://github.com/nv-morpheus/morpheus_third_party_oss/tree/branch-24.10](https://github.com/nv-morpheus/morpheus_third_party_oss/tree/branch-24.10)