From 984b62b473c9968b9fefa695945b59e5194f6a8a Mon Sep 17 00:00:00 2001 From: Peter Kok Date: Thu, 14 Sep 2023 14:11:25 +0200 Subject: [PATCH 1/7] Add validator script --- pyproject.toml | 5 ++++- src/tsdf/validator.py | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 src/tsdf/validator.py diff --git a/pyproject.toml b/pyproject.toml index 9bc1890..b77a1a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "tsdf" -version = "0.3.0" +version = "0.4.0" description = "A Python library that provides methods for encoding and decoding TSDF (Time Series Data Format) data, which allows you to easily create, manipulate and serialize TSDF files in your Python code." authors = ["Peter Kok ", "Pablo Rodríguez ", @@ -35,6 +35,9 @@ pytest-cov = "^4.0.0" pandas = "^1.5.2" matplotlib = "^3.6.3" +[tool.poetry.scripts] +validate-tsdf = "tsdf.validator:main" + [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" diff --git a/src/tsdf/validator.py b/src/tsdf/validator.py new file mode 100644 index 0000000..b5534c5 --- /dev/null +++ b/src/tsdf/validator.py @@ -0,0 +1,41 @@ +import os +import argparse +import traceback +import json +from tsdf import io + +def validate_tsdf_format(file_path): + try: + # Read the meta data (this will check for compulsory fields and such) + metadata = io.load_metadata_from_path(file_path) + + # Get the absolute path of the file and cut off the file name + abs_path = os.path.abspath(file_path) + abs_dir = os.path.dirname(abs_path) + + # Loop through all the files in the metadata + for file_name, file_metadata in metadata.items(): + + # print the file_metadata as json + # print(json.dumps(file_metadata.get_plain_tsdf_dict_copy(), indent=4)) + + # Load the binary data + binary_data = io.load_binary_from_metadata(abs_dir, file_metadata) + + # Success message + print(f"Successfully loaded binary file {file_name}, resulting shape: {binary_data.shape}") + + + except Exception as e: + print(f"Error while validating: {e}") + #traceback.print_exc() + +def main(): + parser = argparse.ArgumentParser(description='Validate a file content against the TSDF format.') + parser.add_argument('file_path', help='Path to the file to validate') + args = parser.parse_args() + + validate_tsdf_format(args.file_path) + +if __name__ == '__main__': + main() From 35eccfc79235a0de53968b69400e9599f4cf8bb8 Mon Sep 17 00:00:00 2001 From: Peter Kok Date: Sat, 16 Sep 2023 22:14:05 +0200 Subject: [PATCH 2/7] Update validator to new module structure --- src/tsdf/validator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tsdf/validator.py b/src/tsdf/validator.py index b5534c5..433e640 100644 --- a/src/tsdf/validator.py +++ b/src/tsdf/validator.py @@ -2,12 +2,12 @@ import argparse import traceback import json -from tsdf import io +from tsdf import read_tsdf, read_binary def validate_tsdf_format(file_path): try: # Read the meta data (this will check for compulsory fields and such) - metadata = io.load_metadata_from_path(file_path) + metadata = read_tsdf.load_metadata_from_path(file_path) # Get the absolute path of the file and cut off the file name abs_path = os.path.abspath(file_path) @@ -20,7 +20,7 @@ def validate_tsdf_format(file_path): # print(json.dumps(file_metadata.get_plain_tsdf_dict_copy(), indent=4)) # Load the binary data - binary_data = io.load_binary_from_metadata(abs_dir, file_metadata) + binary_data = read_binary.load_binary_from_metadata(abs_dir, file_metadata) # Success message print(f"Successfully loaded binary file {file_name}, resulting shape: {binary_data.shape}") From 102417589a2362e6d9a2a21c7b6fcadef07a0981 Mon Sep 17 00:00:00 2001 From: Peter Kok Date: Sat, 16 Sep 2023 22:17:18 +0200 Subject: [PATCH 3/7] Make code more readable, clean up --- docs/processing_example.ipynb | 195 +++++++++++++++------------------- 1 file changed, 86 insertions(+), 109 deletions(-) diff --git a/docs/processing_example.ipynb b/docs/processing_example.ipynb index 7d42ee8..fe9a7b6 100644 --- a/docs/processing_example.ipynb +++ b/docs/processing_example.ipynb @@ -17,9 +17,7 @@ "source": [ "## How to run these examples\n", "\n", - "### Dependencies\n", - "\n", - "In order to run these examples, we'll need to use the following Python packages:\n" + "### Imports\n" ] }, { @@ -28,22 +26,26 @@ "metadata": { "tags": [] }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/peter/miniforge3/lib/python3.9/site-packages/tsdf\n" + ] + } + ], "source": [ "import os\n", "import numpy as np\n", - "import tsdf\n", - "from tsdf.constants import TestConstants as TEST_CONST" + "import tsdf" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "### Test files\n", - "\n", - "The test files used in these examples can be found in [`./tests/data/`](https://github.com/biomarkersParkinson/tsdf/tree/main/tests/data). Use the snippet below to locate your copy of the files:" + "### Set data location" ] }, { @@ -52,17 +54,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Reload modules automatically on changes; useful for developing\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Required import" + "data_dir = \"../tests/data\"" ] }, { @@ -70,10 +62,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Process an existing binary file and write the new TSDF metadata\n", + "## Process an existing binary file and write the new data\n", "Read and process an existing binary data (accompanied by the TSDF metadata), process the data and save it in the new format, with the corresponding TSDF metadata file.\n", "\n", - "### Load dummy data and see its format" + "### Load dummy data" ] }, { @@ -92,12 +84,19 @@ } ], "source": [ + "# The name of the data\n", "data_name = \"example_10_3_int16\"\n", "\n", - "metadata = tsdf.load_metadata_from_path(\n", - " os.path.join(TEST_CONST.TEST_DATA_DIR, data_name + TEST_CONST.METADATA_EXTENSION)\n", - ")[data_name + TEST_CONST.BINARY_EXTENSION]\n", + "# Multiple metadata files (for each binary) are loaded into a dictionary\n", + "metadata_dict = tsdf.load_metadata_from_path(f\"{data_dir}/{data_name}_meta.json\")\n", + "\n", + "# Retrieve the metadata object we want, using the name of the binary as key\n", + "metadata = metadata_dict[f\"{data_name}.bin\"]\n", + "\n", + "# Load the data\n", "data = metadata.load_binary()\n", + "\n", + "# Print some info\n", "print(f\"Data type used for storing:\\t {data.dtype}\")\n", "print(f\"Data dimensions:\\t\\t {data.shape}\")\n", "print(f\"Number of rows:\\t\\t\\t {data.shape[0]}\")" @@ -108,7 +107,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Perform light data processing" + "### Perform basic data processing" ] }, { @@ -127,7 +126,10 @@ } ], "source": [ + "# Perform an operation, resulting in a different data type\n", "processed_data_1 = (data / 10).astype('float32')\n", + "\n", + "# Print some info\n", "print(f\"Data type used for storing:\\t {processed_data_1.dtype}\")\n", "print(f\"Data dimensions:\\t\\t {processed_data_1.shape}\")\n", "print(f\"Number of rows:\\t\\t\\t {processed_data_1.shape[0]}\")" @@ -148,13 +150,16 @@ "metadata": {}, "outputs": [], "source": [ + "# The new name of the file\n", "processed_data_name_1 = \"tmp_test_example_10_3_int16_to_float32\"\n", + "\n", + "# Write the data to a new binary file\n", "processed_metadata_1 = tsdf.write_binary_file(\n", - " TEST_CONST.TEST_OUTPUT_DATA_DIR,\n", - " processed_data_name_1 + TEST_CONST.BINARY_EXTENSION,\n", - " processed_data_1,\n", - " metadata.get_plain_tsdf_dict_copy(),\n", - " )" + " data_dir,\n", + " f\"{processed_data_name_1}.bin\",\n", + " processed_data_1,\n", + " metadata.get_plain_tsdf_dict_copy(),\n", + " )" ] }, { @@ -162,9 +167,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Write the TSDF metadata file that describes the processed binary data format\n", - "\n", - "#### 1) Write the metadata file for a single binary file" + "### Write the TSDF metadata file" ] }, { @@ -174,7 +177,7 @@ "outputs": [], "source": [ "# Write new metadata file\n", - "tsdf.write_metadata([processed_metadata_1], processed_data_name_1 + TEST_CONST.METADATA_EXTENSION)\n" + "tsdf.write_metadata([processed_metadata_1], f\"{processed_data_name_1}_meta.json\")" ] }, { @@ -182,7 +185,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 2) Write a metadata file that combines multiple binary files" + "### Write a metadata file that combines multiple binary files" ] }, { @@ -198,12 +201,11 @@ "updated_metadata = metadata.get_plain_tsdf_dict_copy()\n", "updated_metadata.pop(\"scale_factors\") # remove the 'scale_factors'\n", "\n", - "\n", "# Save the new binary file\n", "processed_data_name_2 = \"tmp_test_example_10_3_int16_to_int32\"\n", "processed_metadata_2 = tsdf.write_binary_file(\n", - " TEST_CONST.TEST_OUTPUT_DATA_DIR,\n", - " processed_data_name_2 + TEST_CONST.BINARY_EXTENSION,\n", + " data_dir,\n", + " f\"{processed_data_name_2}_.bin\",\n", " processed_data_2,\n", " updated_metadata,\n", ")\n", @@ -220,8 +222,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Genarate a new binary file and the corresponding TSDF metadata\n", - "Generate binary data and save it and the corresponding TSDF metadata file." + "## Generate and save data from scratch" ] }, { @@ -230,69 +231,35 @@ "metadata": {}, "outputs": [], "source": [ + "# Generate random data\n", "rs = np.random.RandomState(seed=42)\n", "data_1 = rs.rand(17, 1).astype(np.float32)\n", "data_2 = rs.rand(15, 2).astype(np.int16)\n", "data_3 = rs.rand(10, 3).astype(np.int16)\n", "\n", - "\n", - "# An example where the metadata is defined from scratch\n", - "new_metadata = {}\n", - "new_metadata[\"subject_id\"] = \"example\"\n", - "new_metadata[\"study_id\"] = \"example\"\n", - "new_metadata[\"device_id\"] = \"example\"\n", - "new_metadata[\"endianness\"] = \"little\"\n", - "new_metadata[\"metadata_version\"] = \"0.1\"\n", - "new_metadata[\"start_datetime_unix_ms\"] = (1571135957025,)\n", - "new_metadata[\"start_iso8601\"] = \"2019-10-15T10:39:17.025000+00:00\"\n", - "new_metadata[\"end_datetime_unix_ms\"] = 1571168851826\n", - "new_metadata[\"end_iso8601\"] = \"2019-10-15T19:47:31.826000+00:00\"\n", - "new_metadata[\"channels\"] = [\"x\", \"y\", \"z\"]\n", - "new_metadata[\"units\"] = [\"m/s/s\", \"m/s/s\", \"m/s/s\"]\n", + "# Define the metadata\n", + "new_metadata = {\n", + " \"subject_id\": \"example\",\n", + " \"study_id\": \"example\",\n", + " \"device_id\": \"example\",\n", + " \"endianness\": \"little\",\n", + " \"metadata_version\": \"0.1\",\n", + " \"start_datetime_unix_ms\": 1571135957025,\n", + " \"start_iso8601\": \"2019-10-15T10:39:17.025000+00:00\",\n", + " \"end_datetime_unix_ms\": 1571168851826,\n", + " \"end_iso8601\": \"2019-10-15T19:47:31.826000+00:00\",\n", + " \"channels\": [\"x\", \"y\", \"z\"],\n", + " \"units\": [\"m/s/s\", \"m/s/s\", \"m/s/s\"]\n", + "}\n", "\n", "# Write the three binary files based on the provided metadata\n", - "\n", "file_prefix = \"tmp_test\"\n", - "new_meta_1 = tsdf.write_binary_file(\n", - " TEST_CONST.TEST_OUTPUT_DATA_DIR, file_prefix + \"_1.bin\", data_1, new_metadata\n", - ")\n", - "new_meta_2 = tsdf.write_binary_file(\n", - " TEST_CONST.TEST_OUTPUT_DATA_DIR, file_prefix + \"_2.bin\", data_2, new_metadata\n", - ")\n", - "new_meta_3 = tsdf.write_binary_file(\n", - " TEST_CONST.TEST_OUTPUT_DATA_DIR, file_prefix + \"_3.bin\", data_3, new_metadata\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Save the metadata that corresponds to the binary data. In case of multiple binary files, the corresponding metadata files have to be combined." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "# Write the first metadata file\n", - "tsdf.write_metadata([new_meta_1], file_prefix + \"_1\" + TEST_CONST.METADATA_EXTENSION)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "# Combine and write all metadata files\n", - "tsdf.write_metadata(\n", - " [new_meta_1, new_meta_2, new_meta_3],\n", - " file_prefix + \"_3\" + TEST_CONST.METADATA_EXTENSION,\n", - ")" + "new_meta_1 = tsdf.write_binary_file(data_dir, f\"{file_prefix}_1.bin\", data_1, new_metadata)\n", + "new_meta_2 = tsdf.write_binary_file(data_dir, f\"{file_prefix}_2.bin\", data_2, new_metadata)\n", + "new_meta_3 = tsdf.write_binary_file(data_dir, f\"{file_prefix}_3.bin\", data_3, new_metadata)\n", + "\n", + "# Write the metadata file, which references the three binary files\n", + "tsdf.write_metadata([new_meta_1, new_meta_2, new_meta_3], f\"{file_prefix}_meta.json\")" ] }, { @@ -306,7 +273,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -317,10 +284,8 @@ ")\n", "\n", "# Path to the metadata file\n", - "path_to_file = os.path.join(TEST_CONST.TEST_DATA_DIR, \"ppp_format_meta_legacy.json\")\n", - "path_to_new_file = os.path.join(\n", - " TEST_CONST.TEST_OUTPUT_DATA_DIR, \"tmp_ppp_format_meta.json\"\n", - ")\n", + "path_to_file = os.path.join(data_dir, \"ppp_format_meta_legacy.json\")\n", + "path_to_new_file = os.path.join(data_dir, \"tmp_ppp_format_meta.json\")\n", "\n", "# Generate a TSDF metadata file from TSDB\n", "generate_tsdf_metadata_from_tsdb(path_to_file, path_to_new_file)\n", @@ -337,20 +302,32 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Verify TSDF format\n", - "Method used to verify formatting of existing files." + "## Validate TSDF file\n", + "\n", + "Files can be validated using the validator module, which is also callable from the command line. The validator checks the metadata file and inspects whether the binary file is consistent with the metadata. This snippet shows how to use the validator from code." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 10, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Successfully loaded binary file ppp_format_time.bin, resulting shape: (17,)\n", + "Successfully loaded binary file ppp_format_samples.bin, resulting shape: (17, 6)\n" + ] + } + ], "source": [ - "# from tsdf import validator\n", - "path_to_metadata_file = os.path.join(TEST_CONST.TEST_DATA_DIR, \"ppp_format_meta.json\")\n", + "# Import the validator\n", + "from tsdf import validator\n", + "\n", "# Verify the metadata file\n", - "#validator.validate_tsdf_format(path_to_metadata_file)" + "path_to_metadata_file = os.path.join(data_dir, \"ppp_format_meta.json\")\n", + "validator.validate_tsdf_format(path_to_metadata_file)" ] } ], From d89ce683c0e3f5ed63c241f9c747064a9742d133 Mon Sep 17 00:00:00 2001 From: Peter Kok Date: Sun, 17 Sep 2023 11:26:43 +0200 Subject: [PATCH 4/7] Clean up docs --- docs/contact.md | 2 ++ docs/index.md | 10 +++--- docs/installation.md | 22 +++---------- docs/processing_example.ipynb | 1 + docs/tsdf_fields_table.md | 6 ++-- docs/usage.ipynb | 60 ----------------------------------- mkdocs.yml | 1 - 7 files changed, 14 insertions(+), 88 deletions(-) delete mode 100644 docs/usage.ipynb diff --git a/docs/contact.md b/docs/contact.md index 5aed0a0..e104928 100644 --- a/docs/contact.md +++ b/docs/contact.md @@ -1,5 +1,7 @@ # Contact +More information about the TSDF format can be found in the [TSDF preprint](https://arxiv.org/abs/2211.11294). + This package has been written by engineers from the [Netherlands eScience Center](https://esciencecenter.nl). It is maintained by: diff --git a/docs/index.md b/docs/index.md index b152c90..a2e19d0 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,8 +1,8 @@ -# Welcome to tsdf +# Welcome to the TSDF (Time Series Data Format) Python package -A package to load [TSDF data](https://arxiv.org/abs/2211.11294) into Python. +A package to work with TSDF data in Python. This implementation is based on the the TSDF format specification, which can be found in this [preprint](https://arxiv.org/abs/2211.11294). -## What is `TSDF data`? +## What is TSDF data? `tsdf` stands for _`time series data format`_. It is a unified, standardized format for storing all types of physiological sensor data. It was originally introduced in this [preprint](https://arxiv.org/abs/2211.11294). @@ -10,9 +10,9 @@ It is a unified, standardized format for storing all types of physiological sens TSDF provides a unified, user-friendly format for both numerical sensor data and metadata, utilizing raw binary data and JSON-format text files for measurements/timestamps and metadata, respectively. It defines essential metadata fields to enhance data interpretability and exchangeability, aiming to bolster scientific reproducibility in studies reliant on digital biosensor data as a critical evidence base across various disease domains. -## Example: TSDF Metadata JSON +## Example: TSDF Metadata -This example demonstrates a TSDF metadata file, showcasing the structured format used to easily interpret and read the corresponding binary data. For more intricate examples and detailed specifications, the paper serves as a comprehensive reference. +This example demonstrates a TSDF metadata JSON file, showcasing the structured format used to easily interpret and read the corresponding binary data. For more intricate examples and detailed specifications, the paper serves as a comprehensive reference. ```json { diff --git a/docs/installation.md b/docs/installation.md index 4d28e3f..abaf607 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,30 +1,16 @@ # Installation -## Requirements - -`tsdf` requires a recent version of [Python](https://www.python.org/) and a Python package manager (such as `pip`). -Chances are that you have them already installed. -You can check if that's the case from the command line: - -```bash -python --version -``` - -```bash -pip --version -``` - ## Installing tsdf The package is available in [PyPI](https://pypi.org/project/tsdf/). The latest stable release can be installed using: ```bash -$ pip install tsdf +pip install tsdf ``` -### Installing the develop version +## Installing the development version -The source code is stored and maintained on [GitHub](https://github.com/biomarkersParkinson/tsdf). +The source code is available on [GitHub](https://github.com/biomarkersParkinson/tsdf). If you have `git` installed, the latest version of tsdf can be installed by typing: @@ -41,4 +27,4 @@ Otherwise you can install it manually by following these steps: ## What now? Now you can import functions from the `tsdf` Python package. -See some examples in the next section. \ No newline at end of file +See some examples in the next section. diff --git a/docs/processing_example.ipynb b/docs/processing_example.ipynb index fe9a7b6..9551e07 100644 --- a/docs/processing_example.ipynb +++ b/docs/processing_example.ipynb @@ -88,6 +88,7 @@ "data_name = \"example_10_3_int16\"\n", "\n", "# Multiple metadata files (for each binary) are loaded into a dictionary\n", + "# mapping the binary file name to the metadata object\n", "metadata_dict = tsdf.load_metadata_from_path(f\"{data_dir}/{data_name}_meta.json\")\n", "\n", "# Retrieve the metadata object we want, using the name of the binary as key\n", diff --git a/docs/tsdf_fields_table.md b/docs/tsdf_fields_table.md index 6c1fb66..2cc6b89 100644 --- a/docs/tsdf_fields_table.md +++ b/docs/tsdf_fields_table.md @@ -1,8 +1,6 @@ -# `TSDF` `data` fields +# TSDF fields - -The key element of `TSDF` `data` is a metadata dictionary. -In this section, we will comprehensively explore the mandatory and optional fields within the TSDF format, providing insights into its structured data organization. +TSDF metadata is represented as a dictionary. In this section, we will comprehensively list the mandatory and optional fields within the TSDF format. ## TSDF mandatory fields diff --git a/docs/usage.ipynb b/docs/usage.ipynb deleted file mode 100644 index f0daf6f..0000000 --- a/docs/usage.ipynb +++ /dev/null @@ -1,60 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Usage \n", - "\n", - "Input/Output of `tsdf` files (work in progress)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using `json`" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from tsdf.io import load, loads\n", - "\n", - "with open(\"../tests/data/flat.json\", \"r\") as read_file:\n", - " data = load(read_file)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.10.8 ('tsdf')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "237a52c53308e1ad95829edf2d78ec7874ed6722f9691e1e5c1903f01554073b" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/mkdocs.yml b/mkdocs.yml index 40c49d5..b148011 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -14,5 +14,4 @@ nav: - TSDF fields: tsdf_fields_table.md - Installation: installation.md - Example: processing_example.ipynb - - Usage: usage.ipynb - Contact: contact.md From 7989d963653c028d19b42fccabdf2766494d5ff7 Mon Sep 17 00:00:00 2001 From: Peter Kok Date: Sun, 17 Sep 2023 11:33:20 +0200 Subject: [PATCH 5/7] More docs --- Makefile | 11 ----------- README.md | 10 ++++++++++ 2 files changed, 10 insertions(+), 11 deletions(-) delete mode 100644 Makefile diff --git a/Makefile b/Makefile deleted file mode 100644 index 4da9099..0000000 --- a/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -.PHONY: build serve gh-deploy - -# ===== Documentation ===== -build: - mkdocs build - -serve: - mkdocs serve - -gh-deploy: - mkdocs gh-deploy \ No newline at end of file diff --git a/README.md b/README.md index 420dfb8..a76e2a2 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,16 @@ poetry install poetry run pytest ``` +### Building the documentation + +We use [mkdocs](https://www.mkdocs.org/) to build the documentation. If you want to build the documentation locally, the following commands will prove useful: + +```bash +mkdocs build # build the documentation +mkdocs serve # serve the documentation on a local server +mkdocs gh-deploy # deploy the documentation to GitHub pages +``` + ## Contributing Interested in contributing? Check out the contributing guidelines. Please note that this project is released with a Code of Conduct. By contributing to this project, you agree to abide by its terms. From 596aabc3de961153746526e483d40dc291d37224 Mon Sep 17 00:00:00 2001 From: Peter Kok Date: Sun, 17 Sep 2023 21:12:47 +0200 Subject: [PATCH 6/7] Tests for validator --- src/tsdf/validator.py | 9 ++++++++- tests/test_validator.py | 13 +++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 tests/test_validator.py diff --git a/src/tsdf/validator.py b/src/tsdf/validator.py index 433e640..3bb7a41 100644 --- a/src/tsdf/validator.py +++ b/src/tsdf/validator.py @@ -25,17 +25,24 @@ def validate_tsdf_format(file_path): # Success message print(f"Successfully loaded binary file {file_name}, resulting shape: {binary_data.shape}") + return True except Exception as e: print(f"Error while validating: {e}") #traceback.print_exc() + return False def main(): + # Parse the arguments parser = argparse.ArgumentParser(description='Validate a file content against the TSDF format.') parser.add_argument('file_path', help='Path to the file to validate') args = parser.parse_args() - validate_tsdf_format(args.file_path) + # Perform validation + is_valid = validate_tsdf_format(args.file_path) + + # Exit with error code 1 if the validation failed + exit(0 if is_valid else 1) if __name__ == '__main__': main() diff --git a/tests/test_validator.py b/tests/test_validator.py new file mode 100644 index 0000000..4bf9340 --- /dev/null +++ b/tests/test_validator.py @@ -0,0 +1,13 @@ +import unittest +from tsdf.constants import TestConstants as CONST +from tsdf import validator + +class TestValidator(unittest.TestCase): + + def test_validate_valid_file(self): + result = validator.validate_tsdf_format(CONST.TEST_DATA_FILES["ppp"]) + self.assertTrue(result) + + def test_validate_invalid_file(self): + result = validator.validate_tsdf_format(CONST.TEST_DATA_FILES["missingkey"]) + self.assertFalse(result) From 27c35667361fa883f3ab8f2da67ed6165868304c Mon Sep 17 00:00:00 2001 From: Peter Kok Date: Sun, 17 Sep 2023 21:14:36 +0200 Subject: [PATCH 7/7] Minor cleanup --- docs/processing_example.ipynb | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/docs/processing_example.ipynb b/docs/processing_example.ipynb index 9551e07..017e94f 100644 --- a/docs/processing_example.ipynb +++ b/docs/processing_example.ipynb @@ -26,15 +26,7 @@ "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/Users/peter/miniforge3/lib/python3.9/site-packages/tsdf\n" - ] - } - ], + "outputs": [], "source": [ "import os\n", "import numpy as np\n",