diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 0f0ce905..e7c0cb8f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -22,13 +22,16 @@ Progress: - [ ] Test passes ## To Reproduce + Steps to reproduce the behavior: + 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error ### Failing tests + - [ ] No applicable test failed, need to create. - [ ] other...specify diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 12a63183..d0471595 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -6,6 +6,7 @@ labels: enhancement assignees: '' --- ### User Story + *As a ...insert type of user... I'd like to ...insert desired feature or behavior...* Progress: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4d1efa8f..5ad5877b 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,27 +1,23 @@ ## What existing problem does the pull request solve and why should we include it? - ## What is the testing plan? *Demonstrate the code is solid by discussing how results are verified and covered by tests* - - [ ] Code for this PR is covered in tests - - [ ] Code passes all existing tests +- [ ] Code for this PR is covered in tests +- [ ] Code passes all existing tests ## Code formatting + *Code should be PEP8 compliant before merging by running a package like [`black`](https://pypi.org/project/black/)* - - [ ] Code linted +- [ ] Code linted ## Applicable Issues + *Please do not create a Pull Request without creating an issue first.* *Put `closes #XXXX` in your comment to auto-close the issue that your PR fixes.* - -#### Issues List - - - closes... - - closes... - - +- closes... +- closes... diff --git a/.github/workflows/clean.yml b/.github/workflows/clean.yml new file mode 100644 index 00000000..02a7945f --- /dev/null +++ b/.github/workflows/clean.yml @@ -0,0 +1,30 @@ +name: Clean Docs for Deleted References +on: + delete: + +env: + PYTHON_VERSION: 3.8 + +jobs: + clean: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Set up Python $PYTHON_VERSION + uses: actions/setup-python@v2 + with: + python-version: $PYTHON_VERSION + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install mike + - name: Configure Git user + run: | + git config --local user.email "github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + - name: Delete defunct docs versions + run: | + echo "Deleting ${{ github.event.ref_name }} version from docs" + mike delete --rebase --push ${{ github.event.ref_name }} diff --git a/.github/workflows/flake.yml b/.github/workflows/flake.yml new file mode 100644 index 00000000..12d48844 --- /dev/null +++ b/.github/workflows/flake.yml @@ -0,0 +1,14 @@ +name: pre-commit + +on: + pull_request: + push: + branches: [main, develop] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: pre-commit/action@v2.0.3 diff --git a/.github/workflows/test.yml b/.github/workflows/push.yml similarity index 77% rename from .github/workflows/test.yml rename to .github/workflows/push.yml index e4030b74..db3f7eaa 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/push.yml @@ -1,4 +1,4 @@ -name: Python package +name: Push Workflow on: [push] @@ -9,10 +9,14 @@ jobs: if: "!contains(github.event.head_commit.message, 'skip ci')" strategy: matrix: - python-version: [3.7, 3.8] + python-version: [3.8] steps: - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Document branch + run: echo ${{ github.ref_name }} - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: @@ -20,9 +24,13 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip + sudo apt-add-repository ppa:ubuntugis/ubuntugis-unstable + sudo apt-get update + sudo apt-get install gdal-bin libgdal-dev + pip install GDAL==3.2.3 pip install -r requirements.txt pip install -r dev-requirements.txt - - name: Lint with flake8 + - name: Lint run: | # stop the build if there are Python syntax errors or undefined names flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..574dd6d0 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,69 @@ +name: Release Workflow + +on: + release: + types: [published] + +jobs: + build-test-deploy: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.7, 3.8] + env: + DEPLOY_TARGET: ${{ matrix.python-version == '3.8' }} + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Document python/os version + run: | + echo "Python V ${{ matrix.python-version }}" + echo "Targeted Deployment Combo? $DEPLOY_TARGET" + - name: Document branch + run: echo ${{ github.ref_name }} + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install -r dev-requirements.txt + - name: Lint + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Install package + run: | + pip install -e . + - name: Test with pytest + run: | + pytest -s -m "not skipci" + - name: Configure Git user + if: ${{DEPLOY_TARGET}} + run: | + git config --local user.email "github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + - name: Build docs + if: ${{DEPLOY_TARGET}} + run: | + mike deploy --push --rebase --update-aliases ${{ github.ref_name }} latest + - name: Install deployment dependencies + if: ${{DEPLOY_TARGET}} + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Publish to PyPI + if: ${{DEPLOY_TARGET}} + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + run: | + python setup.py sdist bdist_wheel + twine upload dist/* diff --git a/.gitignore b/.gitignore index 07a12d10..d53a61c8 100644 --- a/.gitignore +++ b/.gitignore @@ -18,7 +18,7 @@ dist/ downloads/ eggs/ .eggs/ -lib/ +#lib/ lib64/ parts/ sdist/ @@ -41,9 +41,19 @@ MANIFEST pip-log.txt pip-delete-this-directory.txt *.log + # except logs in the example !examples/**/*.log +# dowloaded example files +examples/example_union_test_highway.zip +examples/UnionCity/demand_matrices/ +examples/UnionCity/emme_project/ +examples/UnionCity/inputs/ +examples/UnionCity/ref_skim_matrices/ +examples/UnionCity/skim_matrices +examples/UnionCity/ + #temp files tests/scratch/* !tests/scratch/readme.md diff --git a/.markdownlint.yaml b/.markdownlint.yaml index 0b1d6a3d..888c8888 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -5,5 +5,17 @@ default: true MD007: indent: 4 -# Remove line length limit -MD013: false \ No newline at end of file +# Remove line length limit +MD013: false + +# Allow us to skip levels of headings +MD001: false + +# Allow us to use emphasis on whole lines. +MD036: false + +# Allow us to use lower-level headings to start files +MD041: false + +# Fenced code blocks in tabbed views will flag +MD046: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..b5d44465 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,47 @@ +default_stages: [commit] +repos: + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + language_version: python3 + - id: black-jupyter + - repo: https://github.com/PyCQA/pydocstyle + rev: 6.1.1 + hooks: + - id: pydocstyle + stages: [manual] + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.4.0 + hooks: + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - id: end-of-file-fixer + - id: mixed-line-ending + - id: check-added-large-files + - id: check-json + - id: check-toml + - id: check-yaml + args: [--unsafe] + - id: requirements-txt-fixer + - id: check-executables-have-shebangs + - repo: https://github.com/igorshubovych/markdownlint-cli + rev: v0.27.1 + hooks: + - id: markdownlint + stages: [manual] + - repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + args: ["--profile", "black"] + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.14.3 + hooks: + - id: check-github-workflows + - id: check-github-actions + - repo: http://github.com/pycqa/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + stages: [manual] diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d57fef3..285cd891 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # Changelog ## Version (date) + - a list - of things -- that have changed \ No newline at end of file +- that have changed diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..f5266918 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,36 @@ +# Helps auto-assign code reviews for pull requests +# The last match will 'win' + +# Global owners +* @lmz @FlaviaTsang + +# DevOps / Quality Assurance +/.github/ @e-lo @lmz +setup.config @e-lo @lmz +pre-commit-config.yaml @e-lo @lmz +.markdownlint.yaml @e-lo @lmz + +# Documentation Setup +/docs/ @e-lo @lmz +mkdocs.yml @e-lo @lmz + +# Documentation Content +*.md @lmz @FlaviaTsang + +# Tests should be reviewed by dev team +pytest.ini @e-lo @inrokevin @lmz +/tests/ @e-lo @inrokevin @lmz + +# EMME related things should be reviewed by INRO +/tm2py/emme/ @inrokevin + +# Packaging +setup.py @e-lo @lmz +*requirements*.txt @inrokevin @lmz +manifest.in @e-lo @lmz +environment.yml @inrokevin @lmz +Dockerfile @inrokevin @lmz +/bin/ @inrokevin @lmz @e-lo + +# Examples +/examples/ @FlaviaTsang @i-am-sijia diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5eee1c80..63e8e14c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,2 +1,3 @@ # Contribution Guide +Please refer to contribution documentation website: [](https://bayareametro.github.io/tm2py/contributing) diff --git a/Dockerfile b/Dockerfile index 283229c3..c93892ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # docker build -t mypackage . -# docker run --rm -v "$PWD":/home/jovyan/work mypackage /bin/bash +# docker run --rm -v "$PWD":/home/jovyan/work mypackage /bin/bash FROM jupyter/minimal-notebook COPY ../requirements.txt /tmp/requirements.txt @@ -28,4 +28,4 @@ RUN conda clean --all --yes && \ WORKDIR /home/jovyan/work # set default command to launch when container is run -CMD ["jupyter", "lab", "--ip='0.0.0.0'", "--port=8888", "--no-browser", "--NotebookApp.token=''", "--NotebookApp.password=''"] \ No newline at end of file +CMD ["jupyter", "lab", "--ip='0.0.0.0'", "--port=8888", "--no-browser", "--NotebookApp.token=''", "--NotebookApp.password=''"] diff --git a/README.md b/README.md index dee52204..c821a1d9 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,27 @@ # Travel Model 2 Python Package -[![Tests](https://github.com/BayAreaMetro/tm2py/actions/workflows/test.yml/badge.svg)](https://github.com/BayAreaMetro/tm2py/actions/workflows/test.yml) +A python package to run the San Francisco Bay Area's Travel Model. -[![Documentation](https://github.com/BayAreaMetro/tm2py/actions/workflows/docs.yml/badge.svg)](https://github.com/BayAreaMetro/tm2py/actions/workflows/docs.yml) +**Owner:** Metropolitan Transportation Commission (MTC) -[![Package Published](https://github.com/BayAreaMetro/tm2py/actions/workflows/publish.yml/badge.svg)](https://github.com/BayAreaMetro/tm2py/actions/workflows/publish.yml) +[![Tests](https://github.com/BayAreaMetro/tm2py/actions/workflows/test.yml/badge.svg?branch=develop)](https://github.com/BayAreaMetro/tm2py/actions/workflows/test.yml) -## Installation +[![Documentation](https://github.com/BayAreaMetro/tm2py/actions/workflows/docs.yml/badge.svg?branch=develop)](https://github.com/BayAreaMetro/tm2py/actions/workflows/docs.yml) -If you are managing multiple python versions, we suggest using [`virtualenv`](https://virtualenv.pypa.io/en/latest/) or [`conda`](https://conda.io/en/latest/) virtual environments. +[![Package Published](https://github.com/BayAreaMetro/tm2py/actions/workflows/publish.yml/badge.svg?branch=develop)](https://github.com/BayAreaMetro/tm2py/actions/workflows/publish.yml) -The following instructions create and activate a conda environment (recommended) in which you can install: +## Installation -```bash -conda env create -f environment.yml -conda activate tm2py -``` +Recommended install in a virtual environment. -Basic installation instructions are as follows: +Stable (to come - use bleeding edge for now): ```bash pip install tm2py ``` -#### Bleeding Edge -If you want to install a more up-to-date or development version, you can do so by installing it from the `develop` branch as follows: +Bleeding edge: +TODO: Which environment is this? Does it still work for anyone? ```bash conda env create -f environment.yml @@ -32,33 +29,34 @@ conda activate tm2py pip install git+https://github.com/bayareametro/tm2py@develop ``` -#### Developers (from clone) -If you are going to be working on Lasso locally, you might want to clone it to your local machine and install it from the clone. The -e will install it in [editable mode](https://pip.pypa.io/en/stable/reference/pip_install/?highlight=editable#editable-installs). +The above directions didn't work for the MTC Windows environment. The following method did work, on a machine with Emme-4.6.0 installed. This required a compiled GDAL/Fiona package set for python 3.7, which was downloaded from [Christoph Gohlke's Unofficial Windows Binaries for Python Extension Packages](https://www.lfd.uci.edu/~gohlke/pythonlibs/), consisting of the following: +1. GDAL-3.4.1-cp37-cp37m-win_amd64.whl +2. pyproj-3.2.1-cp37-cp37m-win_amd64.whl +3. Fiona-1.8.20-cp37-cp37m-win_amd64.whl +4. Shapely-1.8.0-cp37-cp37m-win_amd64.whl +5. geopandas-0.10.2-py2.py3-none-any.whl -```bash -conda env create -f environment.yml +With these files in hand, the following installation instructions work: + +```bat +conda create -n tm2py python=3.7.6 conda activate tm2py -git clone https://github.com/bayareametro/tm2py -cd tm2py +pip install [the packages listed above, in that order] +cd pip install -e . +conda env config vars set GDAL_VERSION=3.4.1 ``` +Finally, install the Emme python packages using the Emme GUI. This effectively creates a file, +`C:\Users\%USERNAME%\.conda\envs\tm2py\Lib\site-packages\emme.pth` with the following contents, so you could create the file yourself. -Note that you'll also need to install Emme's python packages into this conda environment. -Following these instructions from an INRO community forum post: In the Emme Desktop application, open Tools->Application Options->Modeller, change your Python path as desired and click the "Install Modeller Package" button. - -If this is successful, the following packages will be visible in your environment when you type `pip list`: -* inro-dynameq -* inro-emme -* inro-emme-agent -* inro-emme-engine -* inro-modeller +```python +import os, site; site.addsitedir("C:/Program Files/INRO/Emme/Emme 4/Emme-4.6.0/Python37/Lib/site-packages") +``` -Note that doing the emme package install will also install the package *pywin32*; if *pywin32* gets installed by other means (like -conda or pip), then I got DLL load errors when tryring to import the emme packages, so I recommend uninstalling *pywin32* before -installing the emme packages. +In troubleshooting, sometimes DLL load failure errors would occur which may be resolved by importing gdal before importing emme packages. Emme support explained this thusly: -## Basic Usage +At load time, the EMME API will always load the geos_c co-located with the EMME API, unless it was already loaded from some other location, which is the case when you import GDAL first. EMME API seems to be compatible with the newer GDAL/geos_c (reminder: not tested!). But this does not appear to be the case the other way around (newer GDAL is not compatible with older geos_c). Copy and unzip [example_union_test_highway.zip](https://mtcdrive.box.com/s/3entr016e9teq2wt46x1os3fjqylfoge) to a local drive and from within that directory run: @@ -68,6 +66,47 @@ get_test_data tm2py -s scenario.toml -m model.toml ``` +See [starting out](http://bayareametro.github.com/tm2py) section of documentation for more details. + +### Example Data + +This respository doesn't come with example data due to its size. However, it does provide helper functions to access it from an online bucket: + +```bash +get_test_data location/for/test/data +``` + +Alternatively, you can access it from [example_union_test_highway.zip](https://mtcdrive.box.com/s/3entr016e9teq2wt46x1os3fjqylfoge) + +See [starting out](http://bayareametro.github.com/tm2py) section of documentation for more details. + +### Usage + +#### Python + +```python +import tm2py +controller = RunController( + ["scenario_config.toml", "model_config.toml"], + run_dir="UnionCity", +) +controller.run() +``` + +- `run_dir` specifies specific run directory. Otherwise will use location of first `config.toml` file. + +#### Terminal + +```sh +\bin\tm2py -s examples\scenario_config.toml -m examples\model.toml [-r ] +``` + +- `-s scenario.toml` file location with scenario-specific parameters +- `-m model.toml` file location with general model parameters +- `-r run_dir` specifies specific run directory. Otherwise will use location of first `config.toml` file. + +Additional functionality for various use cases can be found in [Examples](examples). + ## Contributing -Details can be found in [CONTRIBUTING] \ No newline at end of file +Details about contributing can be found on our documentation website: [](https://bayareametro.github.io/tm2py/contributing) diff --git a/bin/get_test_data.bat b/bin/get_test_data.bat index 865a39ec..9e9f0465 100644 --- a/bin/get_test_data.bat +++ b/bin/get_test_data.bat @@ -17,4 +17,4 @@ ECHO "Writing to %CD%" curl -i -X GET %TEST_DATA_LOCATION% -L -o test_data.zip -CD %CWD% \ No newline at end of file +CD %CWD% diff --git a/bin/tm2py b/bin/tm2py index 8b9aa39f..db2cf8d2 100644 --- a/bin/tm2py +++ b/bin/tm2py @@ -2,10 +2,15 @@ import argparse -from tm2py.controller import RunController +# this is mysterious but appears to resolve DLL load errors that otherwise occur +import gdal + +from tm2py import RunController + def usage(): - print("tm2py -s scenario.toml -m model.toml") + print("tm2py -s scenario.toml -m model.toml -r run_dir") + def run(): parser = argparse.ArgumentParser(description="Main: run MTC TM2PY") @@ -14,10 +19,17 @@ def run(): "-s", "--scenario", required=True, help=r"Scenario config file path" ) parser.add_argument("-m", "--model", required=True, help=r"Model config file path") + parser.add_argument( + "-r", + "--run_dir", + required=False, + help=r"Model run directory; defaults to the root of the scenario config if not specified", + ) args = parser.parse_args() - controller = RunController([args.scenario, args.model]) + controller = RunController([args.scenario, args.model], args.run_dir) controller.run() + if __name__ == "__main__": - run() \ No newline at end of file + run() diff --git a/bin/tm2py.bat b/bin/tm2py.bat new file mode 100644 index 00000000..cf6c66cd --- /dev/null +++ b/bin/tm2py.bat @@ -0,0 +1,2 @@ +REM Run adjacent tm2py python script, passing input arguments +python "%~dp0\tm2py" %* diff --git a/bin/update_docs b/bin/update_docs index f81ce8eb..bb35aa12 100644 --- a/bin/update_docs +++ b/bin/update_docs @@ -5,11 +5,13 @@ import os import tm2py # High-level settings -MODULE_CLASS_DOC_LIST = [ - ("classes_components.md",[("## Components", tm2py.components, 1)]), +MODULE_CLASS_DOC_LIST = [ + ("classes_components.md", [("## Components", tm2py.components, 1)]), ("classes_basic.md", [("## Basic", tm2py, 1)]), ("classes_config.md", [("## Config", tm2py.config, 1)]), - ("classes_emme.md",[("## Emme", tm2py.emme, 3) ], + ( + "classes_emme.md", + [("## Emme", tm2py.emme, 3)], ), ] @@ -18,7 +20,7 @@ logger = logging.getLogger() logger.setLevel(logging.INFO) base_dir = os.path.dirname(os.path.dirname(__file__)) -docs_dir = os.path.join(base_dir,"docs") +docs_dir = os.path.join(base_dir, "docs") logger.info(f"Using docs directory:\n {docs_dir}") # Update class diagrams (currently using defaults) @@ -27,9 +29,11 @@ logger.info("Updating class diagrams") from tm2py.utils import doc_modules -for _class_diagram_md,_module_list in MODULE_CLASS_DOC_LIST: +for _class_diagram_md, _module_list in MODULE_CLASS_DOC_LIST: class_diagram_str = doc_modules.generate_md_class_diagram(_module_list) - class_diagram_outfile = os.path.join(docs_dir,"includes","class_diagrams",_class_diagram_md) - with open(class_diagram_outfile,'w') as f: + class_diagram_outfile = os.path.join( + docs_dir, "includes", "class_diagrams", _class_diagram_md + ) + with open(class_diagram_outfile, "w") as f: f.write(class_diagram_str) - logger.info(f"Updated class diagrams in:\n{class_diagram_outfile}") \ No newline at end of file + logger.info(f"Updated class diagrams in:\n{class_diagram_outfile}") diff --git a/configs/model_config.toml b/configs/model_config.toml new file mode 100644 index 00000000..13a8605b --- /dev/null +++ b/configs/model_config.toml @@ -0,0 +1,1660 @@ +#################################### +# MODEL CONFIGURATION # +#################################### + +[[time_periods]] + name = "ea" + length_hours = 3 + start_period = 1 + highway_capacity_factor = 3 + emme_scenario_id = 11 + congested_transit_assn_max_iteration = 1 +[[time_periods]] + name = "am" + length_hours = 4 + start_period = 4 + highway_capacity_factor = 3.65 + emme_scenario_id = 12 + congested_transit_assn_max_iteration = 10 +[[time_periods]] + name = "md" + length_hours = 5 + start_period = 12 + highway_capacity_factor = 5 + emme_scenario_id = 13 + congested_transit_assn_max_iteration = 1 +[[time_periods]] + name = "pm" + length_hours = 4 + start_period = 22 + highway_capacity_factor = 3.65 + emme_scenario_id = 14 + congested_transit_assn_max_iteration = 10 +[[time_periods]] + name = "ev" + length_hours = 8 + start_period = 30 + highway_capacity_factor = 8 + emme_scenario_id = 15 + congested_transit_assn_max_iteration = 1 + +[logging] + # Use default log configuration + log_on_error_file_path = "log_error.txt" + notify_slack = false + use_emme_logbook = true + +[household] + highway_demand_file = "demand_matrices/highway/household/TAZ_Demand_{period}_{iter}.omx" + # transit_demand_file = "demand_matrices/transit/TAP_Demand_{set}_{period}_{iter}.omx" + active_demand_file = "demand_matrices/active/nm_demand_{period}.omx" + transit_demand_file = "demand_matrices/transit/trn_demand_{period}.omx" #temp name format, add iteration? + highway_taz_ctramp_output_file = "ctramp_output/{mode_agg}_{period}_{mode}_{period}.omx" + highway_maz_ctramp_output_file = "ctramp_output/auto_{period}_MAZ_AUTO_{number}_{period}.omx" + transit_tap_ctramp_output_file = "ctramp_output/{mode_agg}_{period}_{mode}_TRN_{set}_{period}.omx" + transit_taz_ctramp_output_file = "ctramp_output/{mode_agg}_{period}_{mode}_TRN_{period}.omx" + ctramp_indiv_trip_file = "ctramp_output/indivTripData_{iteration}.csv" + ctramp_joint_trip_file = "ctramp_output/jointTripData_{iteration}.csv" + ctramp_run_dir = 'ctramp_output' + OwnedAV_ZPV_factor = 0.7 + TNC_ZPV_factor = 0.7 + ctramp_hh_file = "ctramp_output/householdData_{iteration}.csv" + + [[household.mode_agg]] + name = "auto" + modes = [ + "SOV_GP", + "SOV_PAY", + "SR2_GP", + "SR2_HOV", + "SR2_PAY", + "SR3_GP", + "SR3_HOV", + "SR3_PAY", + ] + [[household.mode_agg]] + name = "nonmotor" + modes = ["BIKE", "WALK"] + [[household.mode_agg]] + name = "other" + modes = ["SCHLBUS", "TAXI", "TNC"] + [[household.mode_agg]] + name = "transit" + modes = ["KNRPRV", "KNRTNC", "PNR", "WLK"] + [household.rideshare_mode_split] + "taxi"=0.08 + "single_tnc"=0.72 + "shared_tnc"=0.20 + [household.taxi_split] + "da"= 0.00 + "sr2"= 0.53 + "sr3"= 0.47 + [household.single_tnc_split] + "da"= 0.00 + "sr2"= 0.53 + "sr3"= 0.47 + [household.shared_tnc_split] + "da"= 0.00 + "sr2"= 0.18 + "sr3"= 0.82 + [household.ctramp_mode_names] + 1 = 'sov_gp' + 2 = 'sov_pay' + 3 = 'sr2_gp' + 4 = 'sr2_hov' + 5 = 'sr2_pay' + 6 = 'sr3_gp' + 7 = 'sr3_hov' + 8 = 'sr3_pay' + 9 = 'walk' + 10 = 'bike' + 11 = 'wlk' + 12 = 'pnr' + 13 = 'knr' + 14 = 'knr' + 15 = 'taxi' + 16 = 'tnc' + 17 = 'schlbus' + [household.income_segment] + enabled = false + segment_suffixes = ["LowInc", "MedInc", "HighInc", "XHighInc"] + cutoffs = [0, 30000, 60000, 100000] + + +[air_passenger] + output_trip_table_directory = "demand_matrices/highway/air_passenger" + outfile_trip_table_tmp = "tripsAirPax{period}.omx" + highway_demand_file = "demand_matrices/highway/air_passenger/tripsAirPax{period}.omx" + input_demand_folder = "inputs/nonres" + input_demand_filename_tmpl = "{year}_{direction}{airport}.csv" + reference_start_year = "2007" + reference_end_year = "2035" + airport_names = [ "SFO", "OAK", "SJC",] + [[air_passenger.demand_aggregation]] + name = "DA" + mode = "da" + access_modes = [ "ES", "PK", "RN", "TX", "LI",] + [[air_passenger.demand_aggregation]] + name = "S2" + mode = "sr2" + access_modes = [ "ES", "PK", "RN", "TX", "LI",] + [[air_passenger.demand_aggregation]] + name = "S3" + mode = "sr3" + access_modes = [ "ES", "PK", "RN", "TX", "LI", "VN", "HT", "CH",] + +[internal_external] + output_trip_table_directory = "demand_matrices/highway/internal_external" + outfile_trip_table_tmp = "tripsIx{period}.omx" + highway_demand_file = "demand_matrices/highway/internal_external/tripsIx{period}.omx" + modes = ["da","sr2","sr3"] + [internal_external.demand] + input_demand_file = "inputs/nonres/IXDaily2006x4.may2208.new.omx" + input_demand_matrixname_tmpl = "IX_Daily_{mode}" + reference_year = 2005 + # Union city compatible test with 43 zones + [[internal_external.demand.annual_growth_rate]] + zone_index = [41, 42, 4688, 4689, 4690, 4691, 4692, 4704, 4705, 4706, 4707, 4708] + factor = 1.05 + as_growth_rate = true + [[internal_external.demand.annual_growth_rate]] + zone_index = [4693, 4694,4695, 4696, 4698, 4699, 4701, 4702, 4703] + factor = 1.010 + as_growth_rate = true + [[internal_external.demand.annual_growth_rate]] + zone_index = [4697] + factor = 0 + [[internal_external.demand.annual_growth_rate]] + zone_index = 4700 + factor = 1.015 + as_growth_rate = true + [[internal_external.demand.special_gateway_adjust]] + zone_index = 4693 + factor = 1.020228 + [[internal_external.demand.special_gateway_adjust]] + zone_index = 4695 + factor = 1.242555 + [[internal_external.demand.special_gateway_adjust]] + zone_index = 4696 + factor = 0.848518 + [[internal_external.demand.special_gateway_adjust]] + zone_index = 4698 + factor = 1.673817 + [internal_external.time_of_day] + [[internal_external.time_of_day.classes]] + name = "internal_external" + [[internal_external.time_of_day.classes.time_period_split]] + time_period = "ea" + production = 0.15329 + attraction = 0.06440 + [[internal_external.time_of_day.classes.time_period_split]] + time_period = "am" + production = 0.26441 + attraction = 0.17540 + [[internal_external.time_of_day.classes.time_period_split]] + time_period = "md" + production = 0.25720 + attraction = 0.26950 + [[internal_external.time_of_day.classes.time_period_split]] + time_period = "pm" + production = 0.21490 + attraction = 0.29824 + [[internal_external.time_of_day.classes.time_period_split]] + time_period = "ev" + production = 0.11020 + attraction = 0.19246 + [internal_external.toll_choice] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + [[internal_external.toll_choice.classes]] + name = "da" + skim_mode = "da" + veh_group_name = "da" + [[internal_external.toll_choice.classes]] + name = "sr2" + skim_mode = "sr2" + veh_group_name = "sr2" + [[internal_external.toll_choice.classes.property_factors]] + property = "toll" + coeff = 0.5714285714285714 + [[internal_external.toll_choice.classes]] + name = "sr3" + skim_mode = "sr3" + veh_group_name = "sr3" + [[internal_external.toll_choice.classes.property_factors]] + property = "toll" + coeff = 0.4 + [[internal_external.toll_choice.utility]] + # The in-vehicle time coefficient is taken from the work trip mode choice model. + # coefficient for in-vehicle time = -0.0220/0.25 = -0.088 + property = "time" + coeff = -0.088 + +[truck] + output_trip_table_directory = "demand_matrices/highway/commercial" + outfile_trip_table_tmp = "tripstrk{period}.omx" + highway_demand_file = "demand_matrices/highway/commercial/tripstrk{period}.omx" + [[truck.classes]] + name = "vsmtrk" + description = "very small truck" + [[truck.classes]] + name = "smltrk" + description = "small truck" + [[truck.classes]] + name = "medtrk" + description = "medium truck" + [[truck.classes]] + name = "lrgtrk" + description = "large truck" + [[truck.impedances]] + name = "trk" + skim_mode = "trk" + [truck.impedances.time_blend] + "AM" = 0.3333333333 + "MD" = 0.6666666667 + [[truck.impedances]] + name = "lrgtrk" + skim_mode = "lrgtrk" + [truck.impedances.time_blend] + "AM" = 0.3333333333 + "MD" = 0.6666666667 + [truck.trip_dist] + k_factors_file = "inputs/nonres/truck_kfactors_taz.csv" + friction_factors_file = "inputs/nonres/truckFF.csv" + max_balance_iterations = 999 + max_balance_relative_error = 0.0001 + [[truck.trip_dist.classes]] + name = "vsmtrk" + impedance = "trk" + use_k_factors = false + [[truck.trip_dist.classes]] + name = "smltrk" + impedance = "trk" + use_k_factors = true + [[truck.trip_dist.classes]] + name = "medtrk" + impedance = "trk" + use_k_factors = true + [[truck.trip_dist.classes]] + name = "lrgtrk" + impedance = "lrgtrk" + use_k_factors = true + [[truck.trip_gen.classes]] + name = "vsmtrk" + purpose = "linked" + balance_to = "productions" + [truck.trip_gen.classes.production_formula] + constant = 0 + multiplier = 0.96 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'RETEMPN' + coeff = 0.95409 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'FPSEMPN' + coeff = 0.54333 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'HEREMPN' + coeff = 0.50769 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'OTHEMPN' + coeff = 0.63558 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'AGREMPN' + coeff = 1.10181 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'MWTEMPN' + coeff = 0.81576 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'TOTHH' + coeff = 0.26565 + [[truck.trip_gen.classes]] + name = "smltrk" + purpose = "linked" + balance_to = "productions" + [truck.trip_gen.classes.production_formula] + constant = 0 + multiplier = 1 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'TOTEMP' + coeff = 0.0324 + [[truck.trip_gen.classes]] + name = "smltrk" + purpose = "garage" + balance_to = "productions" + [truck.trip_gen.classes.production_formula] + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'RETEMPN' + coeff = 0.02146 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'FPSEMPN' + coeff = 0.02424 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'HEREMPN' + coeff = 0.01320 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'OTHEMPN' + coeff = 0.04325 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'AGREMPN' + coeff = 0.05021 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'MWTEMPN' + coeff = 0.01960 + [truck.trip_gen.classes.attraction_formula] + [[truck.trip_gen.classes.attraction_formula.land_use_rates]] + property = 'TOTEMP' + coeff = 0.0234 + [[truck.trip_gen.classes]] + name = "medtrk" + purpose = "linked" + balance_to = "productions" + [truck.trip_gen.classes.production_formula] + constant = 0 + multiplier = 1 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'TOTEMP' + coeff = 0.0039 + [[truck.trip_gen.classes]] + name = "medtrk" + purpose = "garage" + balance_to = "productions" + [truck.trip_gen.classes.production_formula] + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'RETEMPN' + coeff = 0.00102 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'FPSEMPN' + coeff = 0.00147 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'HEREMPN' + coeff = 0.00025 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'OTHEMPN' + coeff = 0.00331 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'AGREMPN' + coeff = 0.00445 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'MWTEMPN' + coeff = 0.00165 + [truck.trip_gen.classes.attraction_formula] + [[truck.trip_gen.classes.attraction_formula.land_use_rates]] + property = 'TOTEMP' + coeff = 0.0046 + [[truck.trip_gen.classes]] + name = "lrgtrk" + purpose = "linked" + balance_to = "productions" + [truck.trip_gen.classes.production_formula] + constant = 0 + multiplier = 1 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'TOTEMP' + coeff = 0.0073 + [[truck.trip_gen.classes]] + name = "lrgtrk" + purpose = "garage" + balance_to = "productions" + [truck.trip_gen.classes.production_formula] + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'RETEMPN' + coeff = 0.00183 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'FPSEMPN' + coeff = 0.00482 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'HEREMPN' + coeff = 0.00274 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'OTHEMPN' + coeff = 0.00795 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'AGREMPN' + coeff = 0.01125 + [[truck.trip_gen.classes.production_formula.land_use_rates]] + property = 'MWTEMPN' + coeff = 0.00486 + [truck.trip_gen.classes.attraction_formula] + [[truck.trip_gen.classes.attraction_formula.land_use_rates]] + property = 'TOTEMP' + coeff = 0.0136 + [truck.time_of_day] + [[truck.time_of_day.classes]] + name = "vsmtrk" + [[truck.time_of_day.classes.time_period_split]] + time_period = "ea" + od = 0.0235 + [[truck.time_of_day.classes.time_period_split]] + time_period = "am" + od = 0.0700 + [[truck.time_of_day.classes.time_period_split]] + time_period = "md" + od = 0.6360 + [[truck.time_of_day.classes.time_period_split]] + time_period = "pm" + od = 0.1000 + [[truck.time_of_day.classes.time_period_split]] + time_period = "ev" + od = 0.1705 + [[truck.time_of_day.classes]] + name = "smltrk" + [[truck.time_of_day.classes.time_period_split]] + time_period = "ea" + od = 0.0765 + [[truck.time_of_day.classes.time_period_split]] + time_period = "am" + od = 0.2440 + [[truck.time_of_day.classes.time_period_split]] + time_period = "md" + od = 0.3710 + [[truck.time_of_day.classes.time_period_split]] + time_period = "pm" + od= 0.2180 + [[truck.time_of_day.classes.time_period_split]] + time_period = "ev" + od = 0.0905 + [[truck.time_of_day.classes]] + name = "medtrk" + [[truck.time_of_day.classes.time_period_split]] + time_period = "ea" + od = 0.0665 + [[truck.time_of_day.classes.time_period_split]] + time_period = "am" + od = 0.2930 + [[truck.time_of_day.classes.time_period_split]] + time_period = "md" + od = 0.3935 + [[truck.time_of_day.classes.time_period_split]] + time_period = "pm" + od = 0.1730 + [[truck.time_of_day.classes.time_period_split]] + time_period = "ev" + od = 0.0740 + [[truck.time_of_day.classes]] + name = "lrgtrk" + [[truck.time_of_day.classes.time_period_split]] + time_period = "ea" + od = 0.1430 + [[truck.time_of_day.classes.time_period_split]] + time_period = "am" + od = 0.2320 + [[truck.time_of_day.classes.time_period_split]] + time_period = "md" + od = 0.3315 + [[truck.time_of_day.classes.time_period_split]] + time_period = "pm" + od = 0.1750 + [[truck.time_of_day.classes.time_period_split]] + time_period = "ev" + od = 0.1185 + [truck.toll_choice] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + [[truck.toll_choice.classes]] + name = "vsmtrk" + skim_mode = "trk" + veh_group_name = "vsm" + [[truck.toll_choice.classes]] + name = "smltrk" + skim_mode = "trk" + veh_group_name = "sml" + [[truck.toll_choice.classes]] + name = "medtrk" + skim_mode = "trk" + veh_group_name = "med" + [[truck.toll_choice.classes]] + name = "lrgtrk" + skim_mode = "lrgtrk" + veh_group_name = "lrg" + [[truck.toll_choice.utility]] + property = "time" + coeff = -0.088 + +[active_modes] + emme_scenario_id = 1 + [[active_modes.shortest_path_skims]] + mode = "walk" + roots = "MAZ" + leaves = "MAZ" + max_dist_miles = 3 + output = "skim_matrices/non_motorized/ped_distance_maz_maz.txt" + [[active_modes.shortest_path_skims]] + mode = "walk" + roots = "MAZ" + leaves = "TAP" + max_dist_miles = 0.5 + output = "skim_matrices/non_motorized/ped_distance_maz_tap.txt" + [[active_modes.shortest_path_skims]] + mode = "bike" + roots = "MAZ" + leaves = "MAZ" + max_dist_miles = 3 + output = "skim_matrices/non_motorized/bike_distance_maz_maz.txt" + [[active_modes.shortest_path_skims]] + mode = "bike" + roots = "MAZ" + leaves = "TAP" + max_dist_miles = 3 + output = "skim_matrices/non_motorized/bike_distance_maz_tap.txt" + [[active_modes.shortest_path_skims]] + mode = "bike" + roots = "TAZ" + leaves = "TAZ" + output = "skim_matrices/non_motorized/bike_distance_taz_taz.txt" + [[active_modes.shortest_path_skims]] + mode = "walk" + roots = "TAP" + leaves = "TAP" + max_dist_miles = 0.5 + output = "skim_matrices/non_motorized/ped_distance_tap_tap.txt" + +[highway] + drive_access_output_skim_path = "skim_matrices\\transit\\drive_access\\drive_maz_taz_tap.csv" + output_skim_path = "skim_matrices/highway" + output_skim_filename_tmpl = "HWYSKM{time_period}_taz.omx" + output_skim_matrixname_tmpl = "{time_period}_{mode}_{property}" + relative_gap = 0.0005 + max_iterations = 100 + # labels entire highway network (any of the classes) + MAZ connectors + generic_highway_mode_code = "c" + # include other MAZs to estimate density (pop+jobs*2.5)/acres for each MAZ + area_type_buffer_dist_miles = 0.5 + # nodes at interchanges + interchange_nodes_file = "inputs/hwy/interchange_nodes.csv" + [highway.tolls] + file_path = "inputs/hwy/tolls.csv" + src_vehicle_group_names = ["da", "s2", "s3", "vsm", "sml", "med", "lrg"] + # the dst_vehicle_group_names is used in the class group suffix for the + # highway.classes toll attribute name and the skims name, "bridgetoll_{}" + # and "valuetoll_{}" + dst_vehicle_group_names = ["da", "sr2", "sr3", "vsm", "sml", "med", "lrg"] + # tollbooth separates links with "bridge" tolls (index < this value) + # (used in all classes) vs. "value" tolls (used in toll-available classes only) + valuetoll_start_tollbooth_code = 11 + [highway.maz_to_maz] + mode_code = "x" + excluded_links = [ "is_toll_da", "is_sr",] + operating_cost_per_mile = 18.93 + value_of_time = 17.23 + output_skim_file = "skim_matrices/highway/HWYSKIM_MAZMAZ_DA.csv" + skim_period = "md" + max_distance = 10 + max_skim_cost = 11.0 + # based on ~= 5 miles @ 40 mph = 11 + # = time + (0.6 / vot) * (dist * opcost) + # = 5 / 40 * 60 + (0.6 / 17.23) * (5 * 18.93) + demand_file = "demand_matrices/highway/maz_demand/auto_{period}_MAZ_AUTO_{number}_{period}_{iter}.omx" + [[highway.maz_to_maz.demand_county_groups]] + number = 1 + counties = ["San Francisco", "San Mateo", "Santa Clara"] + [[highway.maz_to_maz.demand_county_groups]] + number = 2 + counties = ["Alameda", "Contra Costa"] + [[highway.maz_to_maz.demand_county_groups]] + number = 3 + counties = ["Solano", "Napa", "Sonoma", "Marin"] + + [[highway.classes]] + name = "da" + veh_group_name = "da" + description = "drive alone" + mode_code = "d" + excluded_links = [ "is_toll_da", "is_sr",] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + toll = [ "@bridgetoll_da" ] + skims = [ "time", "dist", "freeflowtime", "bridgetoll_da", "rlbty", "autotime"] + [[highway.classes.demand]] + source = "household" + name = "SOV_GP_{period}" + [[highway.classes.demand]] + source = "air_passenger" + name = "da" + [[highway.classes.demand]] + source = "internal_external" + name = "da" + [[highway.classes]] + name = "sr2" + veh_group_name = "sr2" + description = "shared ride 2" + mode_code = "e" + excluded_links = [ "is_toll_sr2", "is_sr3",] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + toll = [ "@bridgetoll_sr2" ] + toll_factor = 0.5714285714285714 + skims = [ "time", "dist", "freeflowtime", "bridgetoll_sr2", "hovdist", "rlbty", "autotime"] + [[highway.classes.demand]] + source = "household" + name = "SR2_GP_{period}" + factor = 0.5714285714285714 + [[highway.classes.demand]] + source = "household" + name = "SR2_HOV_{period}" + factor = 0.5714285714285714 + [[highway.classes.demand]] + source = "air_passenger" + name = "sr2" + [[highway.classes.demand]] + source = "internal_external" + name = "sr2" + [[highway.classes]] + name = "sr3" + veh_group_name = "sr3" + description = "shared ride 3+" + mode_code = "f" + excluded_links = [ "is_toll_sr3",] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + toll = ["@bridgetoll_sr3"] + toll_factor = 0.4 + skims = [ "time", "dist", "freeflowtime", "bridgetoll_sr3", "hovdist", "rlbty", "autotime"] + [[highway.classes.demand]] + source = "household" + name = "SR3_GP_{period}" + factor = 0.4 + [[highway.classes.demand]] + source = "household" + name = "SR3_HOV_{period}" + factor = 0.4 + [[highway.classes.demand]] + source = "air_passenger" + name = "sr3" + [[highway.classes.demand]] + source = "internal_external" + name = "sr3" + [[highway.classes]] + name = "trk" + veh_group_name = "trk" + description = "truck" + mode_code = "t" + excluded_links = [ "is_toll_vsm", "is_toll_sml", "is_toll_med", "is_sr",] + value_of_time = 37.87 + operating_cost_per_mile = 31.28 + toll = ["@bridgetoll_sml"] + skims = [ "time", "dist", "freeflowtime", "bridgetoll_vsm", "bridgetoll_sml", "bridgetoll_med",] + [[highway.classes.demand]] + source = "truck" + name = "vsmtrk" + [[highway.classes.demand]] + source = "truck" + name = "smltrk" + [[highway.classes.demand]] + source = "truck" + name = "medtrk" + [[highway.classes]] + name = "lrgtrk" + veh_group_name = "lrgtrk" + description = "large truck" + mode_code = "l" + excluded_links = [ "is_toll_lrg", "is_auto_only",] + value_of_time = 37.87 + operating_cost_per_mile = 31.28 + toll = ["@bridgetoll_lrg"] + pce = 2.0 + skims = [ "time", "dist", "freeflowtime", "bridgetoll_lrg",] + [[highway.classes.demand]] + source = "truck" + name = "lrgtrk" + factor = 2.0 + [[highway.classes]] + name = "datoll" + veh_group_name = "da" + description = "drive alone toll" + mode_code = "D" + excluded_links = [ "is_sr",] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + toll = [ "@valuetoll_da", "@bridgetoll_da" ] + skims = [ "time", "dist", "freeflowtime", "bridgetoll_da", "valuetoll_da", "tolldist", "rlbty", "autotime"] + [[highway.classes.demand]] + source = "household" + name = "SOV_PAY_{period}" + [[highway.classes.demand]] + source = "internal_external" + name = "datoll" + [[highway.classes]] + name = "sr2toll" + veh_group_name = "sr2" + description = "shared ride 2 toll" + mode_code = "E" + excluded_links = [ "is_sr3",] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + toll = [ "@valuetoll_sr2", "@bridgetoll_sr2" ] + toll_factor = 0.5714285714285714 + skims = [ "time", "dist", "freeflowtime", "bridgetoll_sr2", "valuetoll_sr2", "hovdist", "tolldist", "rlbty", "autotime"] + [[highway.classes.demand]] + source = "household" + name = "SR2_PAY_{period}" + factor = 0.5714285714285714 + [[highway.classes.demand]] + source = "household" + name = "TAXI_{period}" + factor = 0.5714285714285714 + [[highway.classes.demand]] + source = "household" + name = "TNC_{period}" + factor = 0.5714285714285714 + [[highway.classes.demand]] + source = "internal_external" + name = "sr2toll" + [[highway.classes]] + name = "sr3toll" + veh_group_name = "sr3" + description = "shared ride 3+ toll" + mode_code = "F" + excluded_links = [] + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + toll = [ "@valuetoll_sr3", "@bridgetoll_sr3" ] + toll_factor = 0.4 + skims = [ "time", "dist", "freeflowtime", "bridgetoll_sr3", "valuetoll_sr3", "hovdist", "tolldist", "rlbty", "autotime"] + [[highway.classes.demand]] + source = "household" + name = "SR3_PAY_{period}" + factor = 0.4 + [[highway.classes.demand]] + source = "internal_external" + name = "sr3toll" + [[highway.classes]] + name = "trktoll" + veh_group_name = "trk" + description = "truck toll" + mode_code = "T" + excluded_links = [ "is_sr",] + value_of_time = 37.87 + operating_cost_per_mile = 31.28 + toll = [ "@valuetoll_sml", "@bridgetoll_sml" ] + skims = [ "time", "dist", "freeflowtime", "bridgetoll_vsm", "bridgetoll_sml", "bridgetoll_med", "valuetoll_vsm", "valuetoll_sml", "valuetoll_med",] + [[highway.classes.demand]] + source = "truck" + name = "vsmtrktoll" + [[highway.classes.demand]] + source = "truck" + name = "smltrktoll" + [[highway.classes.demand]] + source = "truck" + name = "medtrktoll" + [[highway.classes]] + name = "lrgtrktoll" + veh_group_name = "lrgtrk" + description = "large truck toll" + mode_code = "L" + excluded_links = [ "is_auto_only", "is_sr",] + value_of_time = 37.87 + operating_cost_per_mile = 31.28 + pce = 2.0 + toll = [ "@valuetoll_lrg", "@bridgetoll_lrg" ] + skims = [ "time", "dist", "freeflowtime", "bridgetoll_lrg", "valuetoll_lrg",] + [[highway.classes.demand]] + source = "truck" + name = "lrgtrktoll" + factor = 2.0 + + +[transit] + apply_msa_demand = false + # vot=16.2*(180.20/227.47) https://github.com/BayAreaMetro/modeling-website/wiki/InflationAssumptions + value_of_time = 12.8 + walk_speed = 3.0 + # default transit speed used to calculate transit time in case any links has missing time from highway network + transit_speed = 30.0 + effective_headway_source = "hdw" + initial_wait_perception_factor = 1.5 + transfer_wait_perception_factor = 3.0 + walk_perception_factor = 2.0 + walk_perception_factor_cbd = 1.0 + drive_perception_factor = 3.0 + max_transfers = 3 + # fare option + use_fares = false + fare_2015_to_2000_deflator = 0.698 #180.20/258.27 + fares_path = "inputs/trn/fares.far" + fare_matrix_path = "inputs/trn/fareMatrix.txt" + # max expected transfer distance for mode-to-mode transfer fare table generation + fare_max_transfer_distance_miles = 3 + # for TAZ instead of TAPs + ## set to true if use TAZ + override_connector_times = false + #input_connector_access_times_path = "inputs\\trn\\estimated_taz_access_connectors.csv" + #input_connector_egress_times_path = "inputs\\trn\\estimated_taz_egress_connectors.csv" + # capacitated transit assignment methods + use_ccr = false + ccr_stop_criteria.max_iterations = 3 + ccr_stop_criteria.relative_difference = 0.01 + ccr_stop_criteria.percent_segments_over_capacity = 0.01 + ccr_weights.min_seat = 1.0 + ccr_weights.max_seat = 1.4 + ccr_weights.power_seat = 2.2 + ccr_weights.min_stand = 1.4 + ccr_weights.max_stand = 1.6 + ccr_weights.power_stand = 3.4 + eawt_weights.constant = 0.259625 + # congested transit assignment methods + congested_transit_assignment = true + congested.trim_demand_before_congested_transit_assignment = true + congested.output_trimmed_demand_report_path = "output_summaries/trimmed_demand_{period}_{iteration}.csv" + congested.normalized_gap = 0.25 + congested.relative_gap = 0.005 + congested.use_peaking_factor = true + congested.am_peaking_factor = 1.219 + congested.pm_peaking_factor = 1.262 + congested_weights.min_seat = 1.0 + congested_weights.max_seat = 1.4 + congested_weights.power_seat = 2.2 + congested_weights.min_stand = 1.4 + congested_weights.max_stand = 1.6 + congested_weights.power_stand = 3.4 + # output skim and summary paths + # output_skim_path = "skim_matrices/transit/transit_skims_{period}.omx" + output_skim_path = "skim_matrices/transit" + output_skim_filename_tmpl = "trnskm{time_period}_{tclass}.omx" + output_skim_matrixname_tmpl = "{property}" + output_stop_usage_path = "output_summaries/stop_usage_{period}.csv" + output_transit_boardings_path = "output_summaries/boardings_by_line_{period}.csv" + output_transit_segment_path = "output_summaries/transit_segment_{period}.csv" + output_station_to_station_flow_path = "output_summaries/{operator}_station_to_station_{tclass}_{period}.txt" + output_transfer_at_station_path = "output_summaries/{tclass}_transfer_at_{stop}_{period}" + # timed tranfer nodes + timed_transfer_nodes = [2625944, 2625943] # standard network #node_id of Bart stations: 19th street, MacArthur + [transit.output_transfer_at_station_node_ids] + "12th_street"=2625945 # standard network #node_id + "19th street"=2625944 + "MacArthur"=2625943 + + [[transit.classes]] + skim_set_id = "WLK_TRN_WLK" + name = "WLK_TRN_WLK" + description = "walk access and walk egress" + mode_types = ["ACCESS", "EGRESS", "WALK", "LOCAL", "PREMIUM"] + [[transit.classes.demand]] + source = "household" + name = "WLK_TRN_WLK" + [[transit.classes]] + skim_set_id = "PNR_TRN_WLK" + name = "PNR_TRN_WLK" + description = "PNR access and walk egress" + mode_types = ["DRIVE", "PNR_dummy", "WALK", "EGRESS", "LOCAL", "PREMIUM"] + [[transit.classes.demand]] + source = "household" + name = "PNR_TRN_WLK" + [[transit.classes]] + skim_set_id = "WLK_TRN_PNR" + name = "WLK_TRN_PNR" + description = "walk access and PNR egress" + mode_types = ["ACCESS", "WALK", "DRIVE", "PNR_dummy", "LOCAL", "PREMIUM"] + [[transit.classes.demand]] + source = "household" + name = "WLK_TRN_PNR" + [[transit.classes]] + skim_set_id = "KNR_TRN_WLK" + name = "KNR_TRN_WLK" + description = "KNR access and walk egress" + mode_types = ["DRIVE", "KNR_dummy", "WALK", "EGRESS", "LOCAL", "PREMIUM"] + [[transit.classes.demand]] + source = "household" + name = "KNR_TRN_WLK" + [[transit.classes]] + skim_set_id = "WLK_TRN_KNR" + name = "WLK_TRN_KNR" + description = "walk access and KNR egress" + mode_types = ["ACCESS", "WALK", "DRIVE", "KNR_dummy", "LOCAL", "PREMIUM"] + [[transit.classes.demand]] + source = "household" + name = "WLK_TRN_KNR" + +[emme] + #num_processors_highway = "MAX-1" + #num_processors_transit = "16" + num_processors = "MAX-1" + + all_day_scenario_id = 1 + project_path = "emme_project/mtc_emme.emp" + highway_database_path = "emme_project/Database_highway/emmebank" + active_north_database_path = "emme_project/Database_active_north/emmebank" + active_south_database_path = "emme_project/Database_active_south/emmebank" + transit_database_path = "emme_project/Database_transit/emmebank" + + +[[highway.capclass_lookup]] + capclass = 0 + capacity = 0 + free_flow_speed = 0 + critical_speed = 0 +#freeway +[[highway.capclass_lookup]] + capclass = 1 + capacity = 2050 + free_flow_speed = 55 + critical_speed = 25.898 +# +[[highway.capclass_lookup]] + capclass = 2 + capacity = 1450 + free_flow_speed = 40 + critical_speed = 11.772 + +[[highway.capclass_lookup]] + capclass = 3 + capacity = 1450 + free_flow_speed = 30 + critical_speed = 11.772 + +[[highway.capclass_lookup]] + capclass = 4 + capacity = 900 + free_flow_speed = 20 + critical_speed = 7.063 + +[[highway.capclass_lookup]] + capclass = 5 + capacity = 900 + free_flow_speed = 20 + critical_speed = 7.063 + + +[[highway.capclass_lookup]] + capclass = 6 + capacity = 600 + free_flow_speed = 10 + critical_speed = 4.709 + + +[[highway.capclass_lookup]] + capclass = 7 + capacity = 600 + free_flow_speed = 10 + critical_speed = 4.709 + + +[[highway.capclass_lookup]] + capclass = 8 + capacity = 2050 + free_flow_speed = 18 + critical_speed = 47.087 + + + +[[highway.capclass_lookup]] + capclass = 11 + capacity = 2050 + free_flow_speed = 55 + critical_speed = 25.898 + + +[[highway.capclass_lookup]] + capclass = 12 + capacity = 1450 + free_flow_speed = 40 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 13 + capacity = 1500 + free_flow_speed = 30 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 14 + capacity = 950 + free_flow_speed = 25 + critical_speed = 9.417 + + +[[highway.capclass_lookup]] + capclass = 15 + capacity = 950 + free_flow_speed = 25 + critical_speed = 9.417 + + +[[highway.capclass_lookup]] + capclass = 16 + capacity = 650 + free_flow_speed = 15 + critical_speed = 4.709 + + +[[highway.capclass_lookup]] + capclass = 17 + capacity = 650 + free_flow_speed = 15 + critical_speed = 4.709 + + +[[highway.capclass_lookup]] + capclass = 18 + capacity = 2050 + free_flow_speed = 18 + critical_speed = 47.087 + + + + +[[highway.capclass_lookup]] + capclass = 21 + capacity = 2100 + free_flow_speed = 60 + critical_speed = 28.252 + + +[[highway.capclass_lookup]] + capclass = 22 + capacity = 1600 + free_flow_speed = 45 + critical_speed = 14.126 + + +[[highway.capclass_lookup]] + capclass = 23 + capacity = 1550 + free_flow_speed = 35 + critical_speed = 14.126 + + +[[highway.capclass_lookup]] + capclass = 24 + capacity = 1000 + free_flow_speed = 30 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 25 + capacity = 1000 + free_flow_speed = 30 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 26 + capacity = 700 + free_flow_speed = 20 + critical_speed = 7.063 + + +[[highway.capclass_lookup]] + capclass = 27 + capacity = 700 + free_flow_speed = 20 + critical_speed = 7.063 + + +[[highway.capclass_lookup]] + capclass = 28 + capacity = 2100 + free_flow_speed = 18 + critical_speed = 47.087 + + +[[highway.capclass_lookup]] + capclass = 31 + capacity = 2100 + free_flow_speed = 60 + critical_speed = 28.252 + + +[[highway.capclass_lookup]] + capclass = 32 + capacity = 1600 + free_flow_speed = 45 + critical_speed = 14.126 + + +[[highway.capclass_lookup]] + capclass = 33 + capacity = 1550 + free_flow_speed = 35 + critical_speed = 14.126 + + + +[[highway.capclass_lookup]] + capclass = 34 + capacity = 1000 + free_flow_speed = 30 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 35 + capacity = 1000 + free_flow_speed = 30 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 36 + capacity = 700 + free_flow_speed = 25 + critical_speed = 9.417 + + +[[highway.capclass_lookup]] + capclass = 37 + capacity = 700 + free_flow_speed = 25 + critical_speed = 9.417 + + +[[highway.capclass_lookup]] + capclass = 38 + capacity = 2100 + free_flow_speed = 18 + critical_speed = 47.087 + + + +[[highway.capclass_lookup]] + capclass = 41 + capacity = 2150 + free_flow_speed = 65 + critical_speed = 30.607 + + +[[highway.capclass_lookup]] + capclass = 42 + capacity = 1650 + free_flow_speed = 50 + critical_speed = 16.480 + + +[[highway.capclass_lookup]] + capclass = 43 + capacity = 1550 + free_flow_speed = 40 + critical_speed = 16.480 + + +[[highway.capclass_lookup]] + capclass = 44 + capacity = 1050 + free_flow_speed = 35 + critical_speed = 14.126 + + +[[highway.capclass_lookup]] + capclass = 45 + capacity = 1050 + free_flow_speed = 35 + critical_speed = 14.126 + + +[[highway.capclass_lookup]] + capclass = 46 + capacity = 900 + free_flow_speed = 30 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 47 + capacity = 900 + free_flow_speed = 30 + critical_speed = 11.772 + + +[[highway.capclass_lookup]] + capclass = 48 + capacity = 2150 + free_flow_speed = 18 + critical_speed = 47.087 + + +[[highway.capclass_lookup]] + capclass = 51 + capacity = 2150 + free_flow_speed = 65 + critical_speed = 30.607 + + +[[highway.capclass_lookup]] + capclass = 52 + capacity = 1650 + free_flow_speed = 55 + critical_speed = 16.480 + + +[[highway.capclass_lookup]] + capclass = 53 + capacity = 1550 + free_flow_speed = 40 + critical_speed = 16.480 + + +[[highway.capclass_lookup]] + capclass = 54 + capacity = 1050 + free_flow_speed = 40 + critical_speed = 16.480 + + +[[highway.capclass_lookup]] + capclass = 55 + capacity = 1050 + free_flow_speed = 40 + critical_speed = 16.480 + + +[[highway.capclass_lookup]] + capclass = 56 + capacity = 950 + free_flow_speed = 35 + critical_speed = 14.126 + + +[[highway.capclass_lookup]] + capclass = 57 + capacity = 950 + free_flow_speed = 35 + critical_speed = 14.126 + + +[[highway.capclass_lookup]] + capclass = 58 + capacity = 2150 + free_flow_speed = 18 + critical_speed = 47.087 + + + +[[transit.modes]] + mode_id = "D" + name = "drive_acc" + description = "drive_acc" + type = "DRIVE" + assign_type = "AUX_TRANSIT" + speed_or_time_factor = "ul1*1" +[[transit.modes]] + mode_id = "k" + name = "knrdummy" + description = "knrdummy" + type = "KNR_dummy" + assign_type = "AUX_TRANSIT" + speed_or_time_factor = 40.0 +[[transit.modes]] + mode_id = "w" + name = "walk" + description = "walk" + type = "WALK" + assign_type = "AUX_TRANSIT" + speed_or_time_factor = "ul2*1" +[[transit.modes]] + mode_id = "a" + name = "access" + description = "access" + type = "ACCESS" + assign_type = "AUX_TRANSIT" + speed_or_time_factor = "ul2*1" +[[transit.modes]] + mode_id = "e" + name = "egress" + description = "egress" + type = "EGRESS" + assign_type = "AUX_TRANSIT" + speed_or_time_factor = "ul2*1" +[[transit.modes]] + mode_id = "p" + name = "pnrdummy" + description = "pnrdummy" + type = "PNR_dummy" + assign_type = "TRANSIT" + in_vehicle_perception_factor = 1.0 + initial_boarding_penalty = 2.0 + transfer_boarding_penalty = 2.0 + headway_fraction = 0.5 + transfer_wait_perception_factor = 3 +[[transit.modes]] + mode_id = "b" + description = "local_bus" + name = "LOC" + type = "LOCAL" + assign_type = "TRANSIT" + in_vehicle_perception_factor = 1.0 + initial_boarding_penalty = 10.0 + transfer_boarding_penalty = 10.0 + headway_fraction = 0.5 + transfer_wait_perception_factor = 3 +[[transit.modes]] + mode_id = "x" + description = "exp_bus" + name = "EXP" + type = "PREMIUM" + assign_type = "TRANSIT" + in_vehicle_perception_factor = 1.0 + initial_boarding_penalty = 10.0 + transfer_boarding_penalty = 10.0 + headway_fraction = 0.5 + transfer_wait_perception_factor = 3 + eawt_factor = 0.4 +[[transit.modes]] + mode_id = "f" + description = "ferry" + name = "FRY" + type = "PREMIUM" + assign_type = "TRANSIT" + in_vehicle_perception_factor = 0.8 + initial_boarding_penalty = 2.0 + transfer_boarding_penalty = 2.0 + headway_fraction = 0.1 + transfer_wait_perception_factor = 0.1 + eawt_factor = 0.2 +[[transit.modes]] + mode_id = "l" + description = "light_rail" + name = "LTR" + type = "PREMIUM" + assign_type = "TRANSIT" + in_vehicle_perception_factor = 0.9 + initial_boarding_penalty = 7.0 + transfer_boarding_penalty = 7.0 + headway_fraction = 0.5 + transfer_wait_perception_factor = 3 + eawt_factor = 0.4 +[[transit.modes]] + mode_id = "h" + description = "heavy_rail" + name = "HVY" + type = "PREMIUM" + assign_type = "TRANSIT" + in_vehicle_perception_factor = 0.7 + initial_boarding_penalty = 5.0 + transfer_boarding_penalty = 5.0 + headway_fraction = 0.5 + transfer_wait_perception_factor = 3 + eawt_factor = 0.2 +[[transit.modes]] + mode_id = "r" + description = "comm_rail" + name = "COM" + type = "PREMIUM" + assign_type = "TRANSIT" + in_vehicle_perception_factor = 0.7 + initial_boarding_penalty = 5.0 + transfer_boarding_penalty = 5.0 + headway_fraction = 0.3 + transfer_wait_perception_factor = 3 + +# [[transit.vehicles]] +# vehicle_id = 12 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 14 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 13 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 16 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 17 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 20 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 21 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 24 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 28 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 30 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 38 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 42 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 44 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 46 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 49 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 52 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 56 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 60 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 63 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 66 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 68 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 70 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 71 +# mode = "b" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 80 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 81 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 84 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 86 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 87 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 90 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 91 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 92 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 94 +# mode = "x" +# name = "" +# auto_equivalent = 2.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 101 +# mode = "f" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 103 +# mode = "f" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 110 +# mode = "l" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 111 +# mode = "l" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 120 +# mode = "h" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 130 +# mode = "r" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 131 +# mode = "r" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 +# [[transit.vehicles]] +# vehicle_id = 133 +# mode = "r" +# name = "" +# auto_equivalent = 0.0 +# seated_capacity = 1 +# total_capacity = 2 \ No newline at end of file diff --git a/configs/scenario_config.toml b/configs/scenario_config.toml new file mode 100644 index 00000000..fa113045 --- /dev/null +++ b/configs/scenario_config.toml @@ -0,0 +1,48 @@ +#################################### +# SCENARIO CONFIGURATION # +#################################### + +[scenario] + name = "UnionCity" + year = 2015 + verify = false + maz_landuse_file = "inputs/landuse/maz_data.csv" + zone_seq_file = "inputs/landuse/mtc_final_network_zone_seq.csv" + landuse_file = "inputs/landuse/maz_data_withDensity.csv" + landuse_index_column = "TAZ" +[run] + start_component = "" + initial_components = [ + #"create_tod_scenarios", + #"active_modes", + #"air_passenger", + #"prepare_network_highway", + #"highway", + #"highway_maz_skim", + #"prepare_network_transit", + #"drive_access_skims", + #"transit_assign", + #"transit_skim" + ] + global_iteration_components = [ + "household", + #"internal_external", + #"truck", + #"highway_maz_assign", + #"highway", + #"drive_access_skims", + #"transit_assign", + #"transit_skim", + ] + final_components = [] + start_iteration = 0 + end_iteration = 1 + + [run.warmstart] + warmstart = true + warmstart_check = true + household_highway_demand_file = "warmstart/demand_matrices/highway/household/TAZ_Demand_{period}.omx" + household_transit_demand_file = "warmstart/demand_matrices/transit/trn_demand_{period}.omx" + air_passenger_highway_demand_file = "warmstart/demand_matrices/highway/air_passenger/2015_tripsAirPax{period}.omx" + internal_external_highway_demand_file = "warmstart/demand_matrices/highway/internal_external/tripsIx{period}.omx" + truck_highway_demand_file = "warmstart/demand_matrices/highway/commercial/tripstrk{period}.omx" \ No newline at end of file diff --git a/configs/version/model_config_v0_0_0.toml b/configs/version/model_config_v0_0_0.toml index d0f86fad..e4d644cd 100644 --- a/configs/version/model_config_v0_0_0.toml +++ b/configs/version/model_config_v0_0_0.toml @@ -2,7 +2,7 @@ # MODEL CONFIGURATION # #################################### -version = 0.0.0 +version = '0.0.0' [dir] skims = "skims" @@ -70,14 +70,14 @@ type = "SOLA_TRAFFIC_ASSIGNMENT" [highway.assignment.stopping_criteria] relative_gap = 0.0005 -best_relative_gap: 0.0, +best_relative_gap = 0.0 max_iterations = 30 -"normalized_gap": 0.0, +"normalized_gap" = 0.0 [highway.assignment.background_traffic] -"link_component": "ul1", -"turn_component": None, -"add_transit_vehicles": False, +"link_component" = "ul1" +"turn_component" = "" # Null is not valid in TOML +"add_transit_vehicles" = false # TODO document what this is tollbooth_start_index = 11 @@ -208,42 +208,42 @@ average_occupancy = 1.0 name = "shared ride 2 general purpose lanes" file = ["household","TAZ_Demand_{time_period}.omx"] matrix = "SR2_GP_{time_period}" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 1.75 [[highway.demand]] name = "shared ride 2 HOV lanes" file = ["household","TAZ_Demand_{time_period}.omx"] matrix = "SR2_HOV_{time_period}" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 1.75 [[highway.demand]] name = "shared ride 2 Toll Paying" file = ["household","TAZ_Demand_{time_period}.omx"] matrix = "SR2_PAY_{period}" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 1.75 [[highway.demand]] name = "shared ride 3 general HOV lanes" file = ["household","TAZ_Demand_{time_period}.omx"] matrix = "SR3_HOV_{period}" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 2.5 [[highway.demand]] name = "shared ride 3 general purpose lanes" file = ["household","TAZ_Demand_{time_period}.omx"] matrix = "SR3_GP_{period}" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 2.5 [[highway.demand]] name = "shared ride 3 toll-paying" file = ["household","TAZ_Demand_{time_period}.omx"] matrix = "SR3_PAY_{period}" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 2.5 ###### AIR PAX ###### @@ -259,29 +259,29 @@ average_occupancy = 1.0 name = "air passenger shared ride 2" file = ["air_passenger","tripsAirPax{period}.omx"] matrix = "SR2" -highway_class = -average_occupancy = 1.75 +highway_class = "" # Empty values not allowed +average_occupancy = 1.75 [[highway.demand]] name = "air passenger shared ride 2 toll-paying" file = ["air_passenger","tripsAirPax{period}.omx"] matrix = "SR2TOLL" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 1.75 [[highway.demand]] name = "air passenger shared ride 3+" file = ["air_passenger","tripsAirPax{period}.omx"] matrix = "SR3" -highway_class = +highway_class = "" # Empty values not allowed average_occupancy = 2.5 [[highway.demand]] -name = name = "air passenger shared ride 3+ toll-paying" +name = "air passenger shared ride 3+ toll-paying" file = ["air_passenger","tripsAirPax{period}.omx"] mattrix = "SR3TOLL" -highway_class = -average_occupancy = 2.5 +highway_class = "" # Empty values not allowed +average_occupancy = 2.5 ###### INTERNAL EXTERNAL ###### @@ -298,28 +298,28 @@ name = "internal external shared ride 2" file = ["internal_external","tripsIx{time_period}.omx"] matrix = "SR2" highway_class = "shared ride 2" -average_occupancy = +average_occupancy = "" # Empty values not allowed [[emme.highway.classes.demand]] name = "internal external shared ride 2 toll-paying" file = ["internal_external","tripsIx{time_period}.omx"] matrix = "SR2TOLL" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed [[highway.demand]] name = "internal external shared ride 3+" file = ["internal_external","tripsIx{time_period}.omx"] matrix = "SR3" highway_class = "shared ride 3+" -average_occupancy = +average_occupancy = "" # Empty values not allowed [[highway.demand]] name = "internal external shared ride 3+ toll-paying" file = ["internal_external","tripsIx{time_period}.omx"] matrix = "SR3TOLL" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed ###### COMMERCIAL VEHICLES ###### @@ -327,64 +327,64 @@ average_occupancy = name = "commercial vehicles" file = ["commercial","tripstrk{time_period}.omx"] matrix = "CTRUCK" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 [[highway.demand]] name = "commercial vehicles toll-paying" file = ["commercial","tripstrk{time_period}.omx"] matrix = "CTRUCKTOLL" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 [[highway.demand]] name = "very small trucks" file = ["commercial","tripstrk{time_period}.omx"] matrix = "VSTRUCK" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 [[highway.demand]] name = "very small trucks toll-paying" file = ["commercial","tripstrk{time_period}.omx"] matrix = "VSTRUCKTOLL" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 [[highway.demand]] name = "small trucks" file = ["commercial","tripstrk{time_period}.omx"] matrix = "STRUCK" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 [[highway.demand]] name = "small trucks toll-paying" file = ["commercial","tripstrk{time_period}.omx"] -name = "STRUCKTOLL" -highway_class = -average_occupancy = +matrix = "STRUCKTOLL" +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 [[highway.demand]] name = "medium trucks" file = ["commercial","tripstrk{time_period}.omx"] matrix = "MTRUCK" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 [[highway.demand]] name = "medium trucks toll-paying" file = ["commercial","tripstrk{time_period}.omx"] matrix = "MTRUCKTOLL" -highway_class = -average_occupancy = +highway_class = "" # Empty values not allowed +average_occupancy = "" # Empty values not allowed pce = 2.0 @@ -446,5 +446,3 @@ type = "walk" [[aux_mode]] code = "e" type = "walk" - - diff --git a/dev-requirements.txt b/dev-requirements.txt index 102092a2..31fce333 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,5 +1,5 @@ -black +black >= 22.3.0 flake8 pre-commit pytest -recommonmark \ No newline at end of file +recommonmark diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..81716a3d --- /dev/null +++ b/docs/README.md @@ -0,0 +1,268 @@ +A python package to run the San Francisco Bay Area's Travel Model. + +**Owner:** Metropolitan Transportation Commission (MTC) + +## Starting Out + +### Prepare Virtual Environment + +If you are managing multiple python versions, we suggest using [`virtualenv`](https://virtualenv.pypa.io/en/latest/) or [`conda`](https://conda.io/en/latest/) virtual environments. + +The following instructions create and activate a conda environment (recommended) in which you can install: + +```sh +conda env create -f environment.yml +conda activate tm2py +``` + +!!! Tip "pywin32 must be installed by Emme" + + Installing the Emme package will also install the `pywin32` python package. + If `pywin32` gets installed by other means (like conda or pip), then you will get DLL load errors when trying to import the emme packages. To remedy, we recommend checking for `pywin32` before hand and uninstalling `pywin32` before installing the emme packages. + +To see if `pywin32` is installed in your environment, run: + +```sh +conda list +``` + +=== "`pywin32` is listed as being intalled by conda" + + Run: + + ```sh + conda uninstall pywin32 + ``` + +=== "`pywin32` is listed as bieng installed by pip" + + Run: + + ```sh + pip uninstall pywin32 + ``` + +### Install Emme + +You will need to install Emme's python packages into this conda environment **first**. + +!!! Tip + + The `tm2py` library works on top of [Inro's Emme Software](https://www.inrosoftware.com/en/products/emme/). (Almost) all of the functionality of this library requires a valid Emme installation and license. + +=== "Using GUI" + + From an INRO community forum post: + + 1. In the Emme Desktop application, open Tools->Application Options->Modeller + 2. Change your Python path as desired + 3. Click the "Install Modeller Package" button + +=== "Terminal" + + Alternatively, you can copy the `emme.pth` from the Emme install to the site packages: + + ```bash + cd tm2py + REM ... or from the root of the venv / conda environment + copy %EMMEPATH%\\emme.pth Lib\\site-packages\ + ``` + +Installation is successful if the command: + +```sh +conda list +``` + +Returns a list containing: + +```sh +inro-dynameq +inro-emme +inro-emme-agent +inro-emme-engine +inro-modeller +``` + +### Install TM2PY + +After successfully installing the Emme environment, you are ready to install tm2py. + +=== "Basic Install" + + ```bash + pip install tm2py + ``` + +=== "Bleeding Edge" + + If you want to install a more up-to-date or development version, you can do so by installing it from the `develop` branch as follows: + + ```bash + conda env create -f environment.yml + conda activate tm2py + pip install git+https://github.com/bayareametro/tm2py@develop + ``` + !!! Notes + + If you wanted to install from a specific tag/version number or branch, replace `@develop` with `@` or `@tag` + +=== "From clone" + + If you are going to be working on Lasso locally, you might want to clone it to your local machine and install it from the clone. The -e will install it in [editable mode](https://pip.pypa.io/en/stable/reference/pip_install/?highlight=editable#editable-installs). + + ```bash + conda env create -f environment.yml + conda activate tm2py + git clone https://github.com/bayareametro/tm2py + cd tm2py + pip install -e . + ``` + + !!! Notes + + The -e installs it in editable mode which captures changes in the repository as you switch branches. + +!!! tip "Developer Tip" + If you are going to be doing development, we recommend following the installations instructions in the [contributing](contributing.md/development.md) section. + +### Example Data + +!!! warning "Before you run the model" + + Due to size limitations, the bulk of the input data is not stored with the `tm2py` library, but can be accessed following the directions below. + +| **Example** | **Location** | +| ---- | ----- | +| Union City | [example_union_test_highway.zip](https://mtcdrive.box.com/s/3entr016e9teq2wt46x1os3fjqylfoge) | + +Additionally, `tm2py` has a helper function to download the data using the following syntax: + +=== "python" + + ```python + import tm2py + tm2py.get_test_data("UnionCity") + ``` + +=== "terminal" + + ```sh + get_test_data location/for/test/data + ``` + +### Typical Usage + +The following is the typical usage to run the model. Other than the run directory, all the parameters for the model run are specified in [ any number of ] `toml` files, as documented in the configuration [documentation](api.md#configuration) and [example](examples/configuration.md). + +=== "python" + + ```python + import tm2py + controller = RunController( + ["scenario_config.toml", "model_config.toml"], + run_dir="UnionCity", + ) + controller.run() + ``` + - `run_dir` specifies specific run directory. If not specified, it will use location of first `config.toml` file. + +=== "terminal" + + ```sh + tm2py -s scenario.toml -m model.toml -r run_dir + ``` + + - `-s scenario.toml` file location with scenario-specific parameters + - `-m model.toml` file location with general model parameters + - `-r run_dir` specifies specific run directory. Otherwise will use location of first `config.toml` file. + +Additional functionality for various use cases can be found in [Examples](examples). + +## What happens when you run the model + +Setup of model run reads the settings and queues the components for each iteration. + +```python +my_controller = tm2py.RunController(config_files,run_dir) +``` + +```mermaid +flowchart TD + RunController[["Initiate the controller object\nRunController( config_file, run_dir )\nwhich does the following."]] + LoadConfig("Load Configuration\ncontroller.config=Configuration.load_toml()") + Logger("Initiate Logger\ncontroller.logger=Logger()") + queue_inputs("Queue Components\ncontroller.queue_inputs()") + LoadConfig-->Logger-->queue_inputs +``` + +`RunController.queue_inputs()` + +```mermaid +flowchart LR + queue_inputs[["queue_inputs()"]] + + InitialComponents("INITIAL COMPONENTS\nconfig.run.initial_components()") + Iterations("GLOBAL ITERATIONS\nFor each iteration from config.run.start_iteration to config.run.end_iteration") + PerIterComponents("COMPONENTS PER ITERATION\nFor each iteration config.run.global_iteration_components") + IterComponents("COMPONENTS AND ITERATIONS") + FinalComponents("FINAL COMPONENTS\nconfig.run.final_components") + Queue("RunController._queued_components") + + queue_inputs-->Iterations + queue_inputs-->PerIterComponents + PerIterComponents-->IterComponents + Iterations-->IterComponents + queue_inputs-->InitialComponents + InitialComponents-->Queue + IterComponents-->Queue + queue_inputs-->FinalComponents + FinalComponents-->Queue +``` + +Example model run configuraiton file with components in the order they are to be run: + +```toml +[run] + start_component = "" + initial_components = [ + "create_tod_scenarios", + "active_modes", + "air_passenger", + "prepare_network_highway", + "highway", + "highway_maz_skim", + "prepare_network_transit", + "transit_assign", + "transit_skim" + ] + global_iteration_components = [ + "household", + "internal_external", + "truck", + "highway_maz_assign", + "highway", + "prepare_network_transit", + "transit_assign", + "transit_skim" + ] + final_components = [] + start_iteration = 0 + end_iteration = 1 + +``` + +Running the model simply iterates through the queued components. + +```python +my_run = my_controller.run() +``` + +```mermaid +flowchart TD + controller_run[["controller.run()"]] + validate_inputs("controller.validate_inputs()") + component_run[["For each item in controller._queued_components\ncomponent.run()"]] + + validate_inputs-->component_run +``` diff --git a/docs/api.md b/docs/api.md index 336309b4..b92b8127 100644 --- a/docs/api.md +++ b/docs/api.md @@ -4,6 +4,12 @@ ::: tm2py.controller +## Configuration + +::: tm2py.config + +**Configurations for each component are listed with those components** + ## Components ### Base Component @@ -12,15 +18,62 @@ ### Demand Components -::: tm2py.components.demand +::: tm2py.components.demand.prepare_demand + +#### Household Demand + +Personal travel demand generated by household members. + +::: tm2py.components.demand.household +::: tm2py.config.HouseholdConfig + +#### Air Passenger Demand + +::: tm2py.components.demand.air_passenger +::: tm2py.config.AirPassengerDemandAggregationConfig + +#### Commercial Demand + +::: tm2py.components.demand.commercial +::: tm2py.config.TruckConfig + +#### Inter-regional Demand + +::: tm2py.components.demand.internal_external +::: tm2py.config.InternalExternalConfig + +#### Visitor Demand + +::: tm2py.components.demand.visitor + +### Highway Network Components + +::: tm2py.components.network.highway.highway_network + +::: tm2py.components.network.highway.highway_assign + +::: tm2py.config.HighwayConfig +::: tm2py.config.HighwayClassConfig +::: tm2py.config.HighwayTollsConfig +::: tm2py.config.DemandCountyGroupConfig +::: tm2py.components.network.highway.highway_maz +::: tm2py.config.HighwayMazToMazConfig + +### Transit Network Components + +::: tm2py.components.network.transit.transit_assign +::: tm2py.components.network.transit.transit_skim +::: tm2py.config.TransitModeConfig +::: tm2py.config.TransitConfig -### Network Components +### Active Network Components -::: tm2py.components.network +To come. ## Emme Wrappers ::: tm2py.emme +::: tm2py.config.EmmeConfig ## Errata diff --git a/docs/architecture.md b/docs/architecture.md index e5e733d9..a6d76583 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -13,7 +13,9 @@ classDiagram +write_top_sheet() } ``` + ## Controllers + ``` mermaid classDiagram Controller <|-- ModelController diff --git a/docs/contributing/development.md b/docs/contributing/development.md new file mode 100644 index 00000000..12ef7a06 --- /dev/null +++ b/docs/contributing/development.md @@ -0,0 +1,325 @@ +# Development + +## Preparation + +### Install Git + +If you don't have it installed already, you will need to [install Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) version-contorl system. + +=== "LINUX" + + ```sh + sudo apt install git-all + ``` + +=== "Mac" + + [](https://git-scm.com/download/mac) + + or + + ```sh + brew install git + ``` + +=== "Windows" + + [](https://git-scm.com/download/win) + +!!! tip + + Also install [GitHub Desktop](https://desktop.github.com/) to make your life easier. + +### Download `tm2py` Repository + +Have a local clone of the entire `tm2py` repository on your machine: + +=== "Terminal" + + ```sh + git clone https://github.com/BayAreaMetro/tm2py.git + ``` + +=== "GitHub Desktop" + + [Open Clone in GitHub Desktop](x-github-client://openRepo/https://github.com/BayAreaMetro/tm2py) + +### Get a Text Editor + +Text editors with python support and/or integrated development environments (IDEs) make development +a lot easier than working in notepad. + +Some to try: + +- [VS Code](https://code.visualstudio.com/) + +## Development Pattern + +Generally speaking, development uses git branches to manage progress on features and bugs while +maintaining a stable and versioned `main` branch while developing most features from the `develop` +branch as per the [git-flow model](https://nvie.com/posts/a-successful-git-branching-model/) and +product road-mapping as per [issues in milestones](https://github.com/BayAreaMetro/tm2py/milestones) +and managed in the [project board](https://github.com/BayAreaMetro/tm2py/projects). + +```mermaid + gitGraph + commit id: "a" + branch develop + checkout develop + commit id: "initial development setup" + branch featureA + checkout featureA + commit id: "initial try" + commit id: "more work" + commit id: "Passes Tests" + checkout develop + commit id: "small change" + checkout featureA + merge develop + commit id: "Passes Tests w/Develop Updates" + checkout develop + merge featureA + branch featureB + checkout featureB + commit id: "work on another feature" + commit id: "b" + checkout develop + merge featureB + checkout main + merge develop + branch release + checkout release + commit tag: "v0.9-prerelease" + commit tag: "v0.9" + checkout main + merge release + checkout develop + merge main + +``` + +## How to Contribute + +The following are the general steps taken to contribute to `tm2py`. + +### Issue Development + +Generally-speaking, all contributions should support an issue which has a +clearly-defined user-story, a set of tests/conditions which need to be demonstrated in order to +close the issue, an agreed-upon approach, and is assigned to the person who should be working on it. + +### Branch + +Use [GitHub's branching](https://docs.github.com/en/get-started/quickstart/github-flow) capabilities +to create a feature branch from the main `develop` branch which is clearly named (e.g. features:`feat-add-transit-assignment` bug fixes: `fix-crash-macosx`) and check it out. + +=== "Terminal" + + ```sh + git checkout develop + git checkout -b fix-maxos-crash + ``` + +=== "GitHub Desktop" + + [Managing branches documentation(https://docs.github.com/en/desktop/contributing-and-collaborating-using-github-desktop/making-changes-in-a-branch/managing-branches)] + +### Develop tests + +As much as possible, we use +[test-driven development](https://en.wikipedia.org/wiki/Test-driven_development) in order to clearly +define when the work is done and working. This can be acheived through writing a new test or +extending another test. **When this is complete, the specified test should fail.** + +### Fix issue tests/Address user story + +Complete development using the approach agreed upon in the issue. **When this +is complete, the tests for the issue should pass and the user story in the issue should +be satisfied** + +General notes about code style: + +- Use PEP8 general style and Google-style docstrings +- Add logging statements throutout using the [logging module](#Logging) +- Clarity over concision +- Expicit over implicit +- Add comments for non-obvious code where it would take a user a while to figure out + +Confirm tests run: + +=== "With Emme" + + If you have Emme installed, it will automatically run the tests with Emme environment. + + ```sh + pytest -s + ``` + +=== "Using Mocked Emme Environment" + + If you have Emme installed but want to force running the tests with the Mock: + + ```sh + pytest --inro mock + ``` + +### Update/address other failing tests + +Update your branch with the most recent version of the develop +branch (which may have moved forward), resolving any merge-conflicts and other tests that may now +be failing. **When this is complete, all tests should pass.** + +!!! tip + + You can (and should) push your changes throughout your work so that others can see what you + are working on, contribute advice, and sometimes work on the issue with you. + +### Update relevant documentation + +See the [Docmentation on Documentation](./documentation.md). + +### Tidy your work + +In order to make sure all changes comply with our requirements and are consistent +with specifications (i.e. for markdown files, which aren't tested in `pytest`), we use +[`pre-commit`](https://pre-commit.com/): + +```sh + +pre-commit run --all-files +pre-commit run --hook-stage manual --all-files + +``` + +!!! tip + + Often pre-commit checks will "fail" on the first run when they are fixing the issues. When + you run it again, hopefully it will be successful. + +### Pull-Request + +Create the pull-request which clearly defines what the pull request contains +and link it to the issues it addresses in the description via closing keywords (if applicable) or +references. Finally, please assign reviewers who should review the pull-request prior to it being +merged and address their requested changes. + +### Review and Agree on Pull Request with Reviewers + +Pull request author should be responsive to reviewer questions and comments, addressing them in-line and through updated code pushes. + +### Merge + +Merge approved pull-request to `develop` using the `squash all changes` functionality +so that it appears as a single commit on the `develop` branch. Resolve any merge conflicts and +closing any issues which were fully addressed. + +## Logging + +The Logging module has the following levels: + +- *display* +- *file* +- *fallback* + +In addition, there are: + +- *override* logging level filter by component name and iteration, and +- notify slack component (untested at this time) + +### Logging Levels + +Here are the log levels as defined in `TM2PY`: + +| **Level** | **Description** | +| --------- | --------------- | +|TRACE| Highly detailed information which would rarely be of interest except for detailed debugging by a developer.| +|DEBUG| diagnostic information which would generally be useful to a developer debugging the model code; this may also be useful to a model operator in some cases.| +|DETAIL| more detail than would normally be of interest, but might be useful to a model operator debugging a model run / data or understanding model results.| +|INFO| messages which would normally be worth recording about the model operation.| +|STATUS| top-level, model is running type messages. There should be relatively few of these, generally one per component, or one per time period if the procedure is long.| +|WARN| warning messages where there is a possibility of a problem.| +|ERROR| problem causing operation to halt which is normal (or not unexpected) in scope, e.g. file does not exist. Includes general Python exceptions.| +|FATAL| severe problem requiring operation to stop immediately. + +!!! Note + + In practice there may not be a large distinction between ERROR and FATAL in tm2py context. + +### Adding log statements in code + +Messages can be recorded using: + +```python +logger.log(level="INFO") + +#or equivalently + +logger.info() +``` + +Additional arguments: +- Indent the message: `indent=True` + +Group log messages together: +- Using a context: `logger.log_start_end()` +- Using a decorator: + +```python +@LogStartEnd("Highway assignment and skims", level="STATUS") +def run(self): +``` + +### Viewing logging + +Log messages can be shown in the console / notebook (using the logging.display_level) + +```python +import logging +logging.display_level = "INFO" # or DEBUG, etc. +``` + +Log files with written log messages are split into: + +=== "**Run Log**" + For model overview. + + | **Settings** | | + | ----------- | ------------------------ | + | *Location:* | `logging.run_file_path` | + | *Level:* | `logging.run_file_level` | + +=== "**Debug Log**" + A more detailed log. + + | **Settings** | | + | ----------- | ------------------------ | + | *Location:* | `logging.log_file_path` | + | *Level:* | `logging.log_file_level` | + +=== "**Catch-all Log**" + Will output all log messages recorded. + + | **Settings** | | + | ----------- | ------------------------ | + | *Location:* | `logging.log_on_error_file_path` | + | *Level:* | All... | + +!!! Note + + Some logging can be conditional to only run if the log level is filtered in. + + e.g. if it takes a long time to generate the report. There is an example of this in the highway assignment which generates a report of the matrix results statistics only if DEBUG is filtered in for at least one of the log_levels. + +### Additional Settings + +#### Locally override logging level for debugging + +The `logging.iter_component_level` can be used to locally override the logging level filter for debug purposes. This is specified as one or more tuples of (iteration, component_name, log_level). + +!!! Example + + Record **all** messages during the highway component run at iteration 2: + + ``` + logging.iter_component_level: [ [2, "highway", "TRACE"] ] + ``` diff --git a/docs/contributing/documentation.md b/docs/contributing/documentation.md index d8468ddc..5eef5b4c 100644 --- a/docs/contributing/documentation.md +++ b/docs/contributing/documentation.md @@ -4,18 +4,25 @@ Documentation is developed using the Python package [mkdocs](https://www.mkdocs. ## Installing -Using pip: -```sh -pip install -r docs/requirements.txt -``` +The requirements for building the documentation are the same as those for the `tm2py` package. + ## Building Locally -Mkdocs documentation webpages can be built using the following shell command from the `docs` folder: +Mkdocs documentation webpages can be built locally and viewed at the URL specified in the terminal: + ```sh -mkdocs build mkdocs serve ``` +## Linting + +Documentation should be linted before deployment: + +```sh +pre-commit run --all-files +pre-commit run --hook-stage manual --all-files +``` + ## Deploying documentation -Documentation is built and deployed to [http://bayareametro.github.io/tm2py] upon the `develop` branch successfully passing continuous integration tests (to be updated to `master` when released) as specified in `.github/workflows/docs.yml`. +Documentation is built and deployed to [http://bayareametro.github.io/tm2py] using the [`mike`](https://github.com/jimporter/mike) package and Github Actions configured in `.github/workflows/` for each "ref" (i.e. branch) in the tm2py repository. diff --git a/docs/examples/README.md b/docs/examples/README.md new file mode 100644 index 00000000..9fab750e --- /dev/null +++ b/docs/examples/README.md @@ -0,0 +1,7 @@ +# Example Workflows + +| Example | Description | +| ----------- | ------------------------------------ | +| [Run Model](run_model) | Simple model run | +| [Configuration](configuration.md) | To come. | +| [Restart Model Run](restart_model) | To come. | diff --git a/docs/examples/configuration.md b/docs/examples/configuration.md new file mode 100644 index 00000000..3e6c9dba --- /dev/null +++ b/docs/examples/configuration.md @@ -0,0 +1,3 @@ +# Configuration + +To come. diff --git a/docs/examples/restart_model.ipynb b/docs/examples/restart_model.ipynb new file mode 100644 index 00000000..7ab6e0e6 --- /dev/null +++ b/docs/examples/restart_model.ipynb @@ -0,0 +1,28 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Restart Model Run\n", + "\n", + "To Come." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/examples/run_model.ipynb b/docs/examples/run_model.ipynb new file mode 100644 index 00000000..d9ed4970 --- /dev/null +++ b/docs/examples/run_model.ipynb @@ -0,0 +1,166 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "019c3621", + "metadata": {}, + "source": [ + "### Essential run model Notebook example\n", + "\n", + "Import the RunController from tm2py root, and provide as inputs:\n", + "- a list of config files in .toml format, which describe the model settings; by convention this is split into a \"scenario_config.toml\" with a few run / operational parameters, and a \"model_config.toml\" with most of the model details\n", + "- the run directory (if not specified this is the directory of the first config file provided)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "fa3b6717", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from tm2py import RunController\n", + "\n", + "controller = RunController(\n", + " [\n", + " os.path.join(\"..\", \"examples\", \"scenario_config.toml\"),\n", + " os.path.join(\"..\", \"examples\", \"model_config.toml\"),\n", + " ],\n", + " run_dir=os.path.join(\"..\", \"examples\", \"UnionCity\"),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "01fa65c1", + "metadata": {}, + "source": [ + "The example scenario data can be downloaded:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "07cc2621", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'..\\\\examples\\\\UnionCity'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from tm2py import get_example\n", + "\n", + "get_example(root_dir=os.path.join(\"..\", \"\"))" + ] + }, + { + "cell_type": "markdown", + "id": "863bea38", + "metadata": {}, + "source": [ + "Run the model via controller.run():" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "24438d17", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "26-Apr-2022 (14:13:30): Start iteration 0\n", + "26-Apr-2022 (14:13:30): Start prepare network attributes and modes\n", + "26-Apr-2022 (14:13:37): End prepare network attributes and modes\n", + "26-Apr-2022 (14:13:37): Start Highway assignment and skims\n", + "26-Apr-2022 (14:13:37): Start Highway assignment for period ea\n", + "26-Apr-2022 (14:13:37): Set ul1 to 0 for background traffic\n", + "26-Apr-2022 (14:13:37): Start Creating skim matrices\n", + "26-Apr-2022 (14:13:37): End Creating skim matrices\n", + "26-Apr-2022 (14:13:37): Start Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:38): End Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:38): End Highway assignment for period ea\n", + "26-Apr-2022 (14:13:38): Start Highway assignment for period am\n", + "26-Apr-2022 (14:13:38): Set ul1 to 0 for background traffic\n", + "26-Apr-2022 (14:13:38): Start Creating skim matrices\n", + "26-Apr-2022 (14:13:38): End Creating skim matrices\n", + "26-Apr-2022 (14:13:38): Start Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:39): End Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:39): End Highway assignment for period am\n", + "26-Apr-2022 (14:13:39): Start Highway assignment for period md\n", + "26-Apr-2022 (14:13:39): Set ul1 to 0 for background traffic\n", + "26-Apr-2022 (14:13:39): Start Creating skim matrices\n", + "26-Apr-2022 (14:13:39): End Creating skim matrices\n", + "26-Apr-2022 (14:13:39): Start Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:40): End Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:40): End Highway assignment for period md\n", + "26-Apr-2022 (14:13:40): Start Highway assignment for period pm\n", + "26-Apr-2022 (14:13:40): Set ul1 to 0 for background traffic\n", + "26-Apr-2022 (14:13:40): Start Creating skim matrices\n", + "26-Apr-2022 (14:13:40): End Creating skim matrices\n", + "26-Apr-2022 (14:13:40): Start Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:40): End Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:41): End Highway assignment for period pm\n", + "26-Apr-2022 (14:13:41): Start Highway assignment for period ev\n", + "26-Apr-2022 (14:13:41): Set ul1 to 0 for background traffic\n", + "26-Apr-2022 (14:13:41): Start Creating skim matrices\n", + "26-Apr-2022 (14:13:41): End Creating skim matrices\n", + "26-Apr-2022 (14:13:41): Start Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:41): End Run SOLA assignment with path analyses\n", + "26-Apr-2022 (14:13:41): End Highway assignment for period ev\n", + "26-Apr-2022 (14:13:41): End Highway assignment and skims\n", + "26-Apr-2022 (14:13:41): Start SkimMAZCosts run\n", + "26-Apr-2022 (14:13:41): Start SkimMAZCosts _prepare_network\n", + "26-Apr-2022 (14:13:41): End SkimMAZCosts _prepare_network\n", + "26-Apr-2022 (14:13:42): End SkimMAZCosts run\n" + ] + } + ], + "source": [ + "controller.run()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7fe4d281", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/environment.yml b/environment.yml index f1ce810e..29b0c063 100644 --- a/environment.yml +++ b/environment.yml @@ -304,3 +304,4 @@ dependencies: - virtualenv==20.13.3 - watchdog==2.1.6 - wcmatch==8.3 +prefix: /Users/elizabeth/opt/miniconda3/envs/tm2py diff --git a/lib/Fiona-1.8.20-cp37-cp37m-win_amd64.whl b/lib/Fiona-1.8.20-cp37-cp37m-win_amd64.whl new file mode 100644 index 00000000..c53c639c Binary files /dev/null and b/lib/Fiona-1.8.20-cp37-cp37m-win_amd64.whl differ diff --git a/lib/GDAL-3.3.2-cp37-cp37m-win_amd64.whl b/lib/GDAL-3.3.2-cp37-cp37m-win_amd64.whl new file mode 100644 index 00000000..54552d51 Binary files /dev/null and b/lib/GDAL-3.3.2-cp37-cp37m-win_amd64.whl differ diff --git a/lib/Shapely-1.8.1.post1-cp37-cp37m-win_amd64.whl b/lib/Shapely-1.8.1.post1-cp37-cp37m-win_amd64.whl new file mode 100644 index 00000000..187645b8 Binary files /dev/null and b/lib/Shapely-1.8.1.post1-cp37-cp37m-win_amd64.whl differ diff --git a/lib/geopandas-0.10.2-py2.py3-none-any.whl b/lib/geopandas-0.10.2-py2.py3-none-any.whl new file mode 100644 index 00000000..fec2f77c Binary files /dev/null and b/lib/geopandas-0.10.2-py2.py3-none-any.whl differ diff --git a/lib/pyproj-3.2.1-cp37-cp37m-win_amd64.whl b/lib/pyproj-3.2.1-cp37-cp37m-win_amd64.whl new file mode 100644 index 00000000..0059ac51 Binary files /dev/null and b/lib/pyproj-3.2.1-cp37-cp37m-win_amd64.whl differ diff --git a/mkdocs.yml b/mkdocs.yml index 0e54f2ab..4e2821b6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -83,4 +83,4 @@ markdown_extensions: - toc: # insert a blank space before the character permalink: " ¶" - \ No newline at end of file + diff --git a/notebooks/README.md b/notebooks/README.md index 8c2f7d1e..bc3cda5b 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -1,4 +1,3 @@ # Notebooks Repository for storing useful [Jupyter Notebooks](https://jupyter-notebook.readthedocs.io/en/stable/) - diff --git a/pytest.ini b/pytest.ini index 2247cea0..916a2ff1 100644 --- a/pytest.ini +++ b/pytest.ini @@ -2,3 +2,4 @@ markers = skipci: Marker to skip if running continuous integration. Useful for lengthy tests. menow: Marker indicating a test you are currently working on addressing. +pythonpath = . diff --git a/requirements.txt b/requirements.txt index 3d808565..a9f79008 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,31 +1,37 @@ -geojson -geographiclib -geopandas > 0.8.0 fiona folium fontawesome_markdown -geopandas -jinja2 < 3.1.0 +gdal +geographiclib +geojson +geopandas > 0.8.0 jsonschema jupyter lark-parser +markdown == 3.3.1 # needs to be compatible with mkdocs, which needs > markdown 3.2.1 +mike mkdocs mkdocs-autorefs mkdocs-awesome-pages-plugin +mkdocs-jupyter mkdocs-macros-plugin mkdocs-material mkdocstrings -openmatrix -osmnx >= 0.12 +mkdocstrings-python notebook numpy +openmatrix +osmnx >= 0.12 pandas > 1.0 -pyarrow pydantic -pyproj > 2.2.0 +pydantic +pyproj > 2.2.0 +pywin32==224 ; sys_platform == 'win32' pyyaml pywin32==224 ; sys_platform == 'win32' rtree +scipy shapely tables toml +typing_extensions diff --git a/setup.cfg b/setup.cfg index 99b0bfcd..a4bc1af4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,3 +12,5 @@ exclude = build, docs/**, __pycache__ +[pydocstyle] +convention = google diff --git a/setup.py b/setup.py index 61116b0c..ce23fb82 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,7 @@ +"""Installation script for tm2py package.""" + import os + from setuptools import setup version = "0.0.1" @@ -24,11 +27,17 @@ with open("dev-requirements.txt") as f: dev_requirements = f.readlines() install_requires_dev = [r.strip() for r in dev_requirements] +if os.path.exists(os.path.join("docs", "requirements.txt")): + with open(os.path.join("docs", "requirements.txt")) as f: + doc_requirements = f.readlines() + install_requires_doc = [r.strip() for r in doc_requirements] +else: + install_requires_doc = [] # While version is in active development, install both development and base requirements. major_version_number = int(version.split(".")[0]) if major_version_number < 1: - install_requires = install_requires + install_requires_dev + install_requires = install_requires + install_requires_dev + install_requires_doc setup( name="tm2py", diff --git a/tests/README.md b/tests/README.md index 7c341bdf..865b9657 100644 --- a/tests/README.md +++ b/tests/README.md @@ -11,41 +11,47 @@ Tests are run with the [pyTest](pytest.org)/ Pytest can be installed using one of the following options. Install along with all development requirements (recommended): + +=== pip + ```sh pip install -r dev-requirements.txt ``` -Install using PIP: + +Install using PIP: + ```sh pip install pytest ``` -Install using Conda: + +Install using Conda: + ```sh conda install pytest ``` ## Running tests -1. Run all tests +=== All tests + ```sh -pytest +pytest ``` -2. Run tests in `test_basic.py` +=== Tests in a specific file + ```sh pytest tests/test_basic.py ``` -3. Run tests decorated with @pytest.mark.favorites decorator +=== Tests with a specific decorator + ```sh pytest -m favorites ``` -4. Run all tests and print out stdout -```sh -pytest -s -``` +=== Continuous Integration Tests -5. Run all tests which are run on the CI server ```sh pytest -v -m "not skipci" ``` diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..315df9f3 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,98 @@ +"""Shared fixtures for tests.""" +import os +import sys +from pathlib import Path + +# why import gdal first: https://github.com/BayAreaMetro/tm2py/blob/7a563f0c5cea2125f28bfaedc50205e70c532094/README.md?plain=1#L57 +import gdal +import pytest + +print("CONFTEST LOADED") + + +@pytest.fixture(scope="session") +def root_dir(): + """Root tm2py directory.""" + d = os.path.dirname(os.path.abspath(__file__)) + for i in range(3): + if "examples" in os.listdir(d): + return Path(d) + d = os.path.dirname(d) + + +@pytest.fixture(scope="session") +def examples_dir(root_dir): + """Directory for example files.""" + return root_dir / "examples" + + +@pytest.fixture(scope="session") +def bin_dir(root_dir): + """Directory for bin files.""" + return root_dir / "bin" + + +# todo: why not use the existing tmp_path fixture? +# https://docs.pytest.org/en/7.1.x/how-to/tmp_path.html +@pytest.fixture() +def temp_dir(): + """Create a temporary directory and clean it up upon test completion. + + Yields: + Path: Path object of temporary directory location + """ + import tempfile + + tf = tempfile.TemporaryDirectory() + yield tf.name + tf.cleanup() + + +def pytest_addoption(parser): + """Parse command line arguments.""" + parser.addoption("--inro", action="store", default="notmock") + print("pytest_addoption") + + +def mocked_inro_context(): + import unittest.mock + + """Mocking of modules which need to be mocked for tests.""" + sys.modules["inro.emme.database.emmebank"] = unittest.mock.MagicMock() + sys.modules["inro.emme.network"] = unittest.mock.MagicMock() + sys.modules["inro.emme.database.scenario"] = unittest.mock.MagicMock() + sys.modules["inro.emme.database.matrix"] = unittest.mock.MagicMock() + sys.modules["inro.emme.network.node"] = unittest.mock.MagicMock() + sys.modules["inro.emme.desktop.app"] = unittest.mock.MagicMock() + sys.modules["inro"] = unittest.mock.MagicMock() + sys.modules["inro.modeller"] = unittest.mock.MagicMock() + + +@pytest.fixture(scope="session") +def inro_context(pytestconfig): + """Mocks necessary inro modules if they aren't successfully imported.""" + + try: + # obey command line option + _inro = pytestconfig.getoption("inro") + print("_inro = [{}]".format(_inro)) + if _inro.lower() == "mock": + print("Mocking inro environment.") + mocked_inro_context() + else: + import inro.emme.database.emmebank + + print("Using inro environment.") + except Exception as inst: + print(type(inst)) # the exception instance + print(inst.args) # arguments stored in .args + print(inst) # __str__ allows args to be printed directly, + + # if commandline option fails, try using Emme and then failing that, using Mock + try: + import inro.emme.database.emmebank + + print("Using inro environment.") + except ModuleNotFoundError: + print("Mocking inro environment.") + mocked_inro_context() diff --git a/tests/test_air_access.py b/tests/test_air_access.py new file mode 100644 index 00000000..9ecceb1f --- /dev/null +++ b/tests/test_air_access.py @@ -0,0 +1,20 @@ +"Test airport passenger access model." + +import os + +import pytest + +from tm2py.examples import get_example + + +@pytest.mark.menow +def test_air_pax_model(inro_context, examples_dir, root_dir): + "Tests that airport access model be run." + from tools import test_component + + get_example(example_name="UnionCity", root_dir=root_dir) + + my_run = test_component(examples_dir, "air_passenger") + my_run.run_next() + + # TODO write assert diff --git a/tests/test_config.py b/tests/test_config.py index 3be9d088..e517e91c 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,46 +1,53 @@ +"""Testing module for the run configuration data classes.""" + import os import sys from unittest.mock import MagicMock import pytest -EXAMPLE_DIR = os.path.join( - os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "examples" -) -TEST_CONFIG = os.path.join(EXAMPLE_DIR, "scenario_config.toml") -MODEL_CONFIG = os.path.join(EXAMPLE_DIR, "model_config.toml") - -def test_config_read(): +def test_config_read(examples_dir, inro_context): """Configuration should load parameters to the correct namespaces.""" - # If (and only if) Emme is not installed, replace inro libraries with MagicMock - try: - import inro.emme.database.emmebank - except ModuleNotFoundError: - sys.modules["inro.emme.database.emmebank"] = MagicMock() - sys.modules["inro.emme.network"] = MagicMock() - sys.modules["inro.emme.database.scenario"] = MagicMock() - sys.modules["inro.emme.database.matrix"] = MagicMock() - sys.modules["inro.emme.network.node"] = MagicMock() - sys.modules["inro.emme.desktop.app"] = MagicMock() - sys.modules["inro"] = MagicMock() - sys.modules["inro.modeller"] = MagicMock() + SCENARIO_CONFIG = "scenario_config.toml" + MODEL_CONFIG = "model_config.toml" + from tm2py.config import Configuration - my_config = Configuration.load_toml([TEST_CONFIG, MODEL_CONFIG]) + _scenario_config = os.path.join(examples_dir, SCENARIO_CONFIG) + _model_config = os.path.join(examples_dir, MODEL_CONFIG) + + my_config = Configuration.load_toml([_scenario_config, _model_config]) assert my_config.run.start_iteration == 0 - assert my_config.run.end_iteration == 1 + assert my_config.run.end_iteration == 2 assert my_config.scenario.year == 2015 + assert my_config.run.initial_components == ( + # "create_tod_scenarios", + # "active_modes", + "air_passenger", + "prepare_network_highway", + "highway", + "highway_maz_skim", + "prepare_network_transit", + "transit_assign", + "transit_skim", + ) assert my_config.time_periods[1].name == "am" assert my_config.highway.maz_to_maz.operating_cost_per_mile == 18.93 assert len(my_config.time_periods) == 5 assert my_config.highway.classes[0].description == "drive alone" + assert my_config.logging.log_file_path.startswith("tm2py_debug_") + assert my_config.logging.log_file_path.endswith(".log") + assert my_config.logging.display_level == "STATUS" -@pytest.mark.xfail def test_config_read_badfile(): """Should have good behavior when file isn't there.""" from tm2py.config import Configuration - Configuration.load_toml("this_is_not_a_valid_file.toml") + try: + Configuration.load_toml("this_is_not_a_valid_file.toml") + raise AssertionError("Should have thrown an exception.") + except FileNotFoundError: + pass diff --git a/tests/test_docs.py b/tests/test_docs.py index e36eb0ad..c66ee509 100644 --- a/tests/test_docs.py +++ b/tests/test_docs.py @@ -1,12 +1,12 @@ +"""Testing module for documentation.""" + import pytest def test_docs_build(): - """ - Test that the documentation build is successful. - """ - import subprocess + """Test that the documentation build is successful.""" import os + import subprocess # Get the path to the base directory base_dir = os.path.join(os.path.dirname(__file__), "..") @@ -14,10 +14,12 @@ def test_docs_build(): # Build the docs try: - subprocess.run(["mkdocs", "build"], check=True, cwd=base_dir,capture_output=True) + subprocess.run( + ["mkdocs", "build"], check=True, cwd=base_dir, capture_output=True + ) except subprocess.CalledProcessError as e: - msg = e.stderr.decode('utf-8') + msg = e.stderr.decode("utf-8") pytest.fail(f"Documentation Failed to Build.\n {msg}") # Check that the docs were built successfully - assert os.path.exists(os.path.join(base_dir, "site", "index.html")) \ No newline at end of file + assert os.path.exists(os.path.join(base_dir, "site", "index.html")) diff --git a/tests/test_externals.py b/tests/test_externals.py new file mode 100644 index 00000000..e3d5fb1f --- /dev/null +++ b/tests/test_externals.py @@ -0,0 +1,20 @@ +"Test external travel model." + +import os + +import pytest + +from tm2py.examples import get_example + + +# @pytest.mark.menow +def test_external_travel(examples_dir, root_dir): + "Tests that internal/external travel component can be run." + from tools import test_component + + get_example(example_name="UnionCity", root_dir=root_dir) + + my_run = test_component(examples_dir, "internal_external") + my_run.run_next() + + # TODO write assert diff --git a/tests/test_highway.py b/tests/test_highway.py new file mode 100644 index 00000000..38ac5e94 --- /dev/null +++ b/tests/test_highway.py @@ -0,0 +1,51 @@ +"""Testing of highway network components""" +import glob +import os + +import pytest +from tools import assert_csv_equal, diff_omx + +from tm2py.examples import get_example + + +def test_highway(examples_dir, root_dir): + "Tests that prepare highway network component can be run." + from tools import test_component + + get_example(example_name="UnionCity", root_dir=root_dir) + + my_run = test_component(examples_dir, ["prepare_network_highway", "highway"]) + my_run.run() + + # TODO write assert + + +def test_highway_skims(examples_dir): + """Test that the OMX highway skims match the reference.""" + run_dir = os.path.join(examples_dir, "UnionCity") + + ref_dir_hwy_skims = os.path.join(run_dir, "ref_skim_matrices", "highway") + ref_skim_files = glob.glob(os.path.join(ref_dir_hwy_skims, "*.omx")) + + run_dir_hwy_skims = os.path.join(run_dir, "skim_matrices", "highway") + run_skim_files = glob.glob(os.path.join(run_dir_hwy_skims, "*.omx")) + + # check that the expected files are all there + ref_skim_names = [os.path.basename(f) for f in ref_skim_files] + run_skim_names = [os.path.basename(f) for f in run_skim_files] + + assert set(ref_skim_names) == set( + run_skim_names + ), f"Skim matrix names do not match expected\ + reference. \n Expected: {ref_skim_names}\n Actual: {run_skim_names}" + + missing_skims = [] + different_skims = [] + + for ref_skim_f, run_skim_f in zip(ref_skim_files, run_skim_files): + _missing_ms, _diff_ms = diff_omx(ref_skim_f, run_skim_f) + missing_skims.extend([ref_skim_f + _m for _m in _missing_ms]) + different_skims.extend(ref_skim_f + _m for _m in _diff_ms) + + assert len(missing_skims) == 0, f"Missing skims: {missing_skims}" + assert len(different_skims) == 0, f"Different skims: {different_skims}" diff --git a/tests/test_highway_maz.py b/tests/test_highway_maz.py new file mode 100644 index 00000000..ec8877e1 --- /dev/null +++ b/tests/test_highway_maz.py @@ -0,0 +1,37 @@ +"""Testing of highway network components""" +import glob +import os + +import pytest +from conftest import inro_context +from tools import assert_csv_equal, diff_omx + +from tm2py.examples import get_example + + +def test_highway_maz(inro_context, examples_dir, root_dir): + "Tests that highway MAZ network assignment component can be run." + from tools import test_component + + get_example(example_name="UnionCity", root_dir=root_dir) + + my_run = test_component(examples_dir, ["highway_maz_assign", "highway_maz_skim"]) + + if inro_context != "inro": + return + my_run.run() + + # TODO write assert + + +def test_maz_da_skims(examples_dir): + """Test that the DA MAZ skims match the reference.""" + run_dir = os.path.join(examples_dir, "UnionCity") + + ref_dir_hwy_skims = os.path.join(run_dir, "ref_skim_matrices", "highway") + run_dir_hwy_skims = os.path.join(run_dir, "skim_matrices", "highway") + + ref_csv = os.path.join(ref_dir_hwy_skims, "HWYSKIM_MAZMAZ_DA.csv") + run_csv = os.path.join(run_dir_hwy_skims, "HWYSKIM_MAZMAZ_DA.csv") + + return assert_csv_equal(ref_csv, run_csv) diff --git a/tests/test_household.py b/tests/test_household.py new file mode 100644 index 00000000..74b9b1e4 --- /dev/null +++ b/tests/test_household.py @@ -0,0 +1,16 @@ +"Test household travel model." + +import os + +import pytest + + +@pytest.mark.skipci +def test_household_travel(examples_dir): + "Tests that household travel component can be run." + from tools import test_component + + my_run = test_component(examples_dir, "household") + my_run.run_next() + + # TODO write assert diff --git a/tests/test_logging.py b/tests/test_logging.py new file mode 100644 index 00000000..3e995243 --- /dev/null +++ b/tests/test_logging.py @@ -0,0 +1,137 @@ +"""Test module for Logging.""" +import os +import pathlib +from datetime import datetime + + +def test_log(tmp_path: pathlib.Path): + """Test basic log operation outside model operation.""" + import tempfile + + from tm2py.config import LoggingConfig + from tm2py.logger import Logger + + # use a stand-in minimal Controller and Config class to operate Logger + log_config = { + "display_level": "STATUS", + "run_file_path": "tm2py_run.log", + "run_file_level": "STATUS", + "log_file_path": "tm2py_debug.log", + "log_file_level": "DEBUG", + "log_on_error_file_path": "tm2py_error.log", + "notify_slack": False, + "use_emme_logbook": False, + "iter_component_level": None, + } + + class Config: + logging = LoggingConfig(**log_config) + + class Controller: + def __init__(self, run_dir): + self.config = Config() + self.run_dir = run_dir + self.iter_component = None + self.logger = Logger(self) + + class TestException(Exception): + pass + + # we'll use the tmp_path for our logs + print("tmp_path: {}".format(tmp_path)) + assert tmp_path.is_dir() + + controller = Controller(tmp_path) + logger = controller.logger + # Use an error to test the recording of error messages + # as well as the generation of the "log_on_error" file + try: + logger.log("a message") # default log level is INFO + logger.log("A status", level="STATUS") + logger.log("detailed message", level="DETAIL") + logger.clear_msg_cache() # what is this? Why is it called here? + with logger.log_start_end("Running a set of steps"): + logger.log("Indented message with timestamp") + logger.log("Indented displayed message with timestamp", level="STATUS") + logger.log( + "A debug message not indented", + level="DEBUG", + indent=False, + ) + logger.log("A debug message", level="DEBUG") + logger.log("A trace message", level="TRACE") + if logger.debug_enabled: + # only generate this report if logging DEBUG + logger.log("A debug report that takes time to produce", level="DEBUG") + logger.warn("Warning") + + # raising error to test recording of error message in log + raise TestException("an error") + except TestException: + # catching the error to continue testing the content of the logs + + # I think the context / logcache was meant to make the following line unnecessary and + # automate error logging when an exception is thrown during a logging context + # But given that I don't like logging contexts, I think it's fine to explicitly log + # errors when they're caught + logger.error("TestException caught") + pass + + # Check the run_file recorded the high-level "STATUS" messages and above + print( + "Checking log messages in {}".format( + os.path.join(controller.run_dir, log_config["run_file_path"]) + ) + ) + with open(os.path.join(controller.run_dir, log_config["run_file_path"]), "r") as f: + text = [] + for line in f: + text.append(line) + print("Log run file: {}".format(text)) + assert len(text) == 6 # 4 status, 1 warning, 1 error + assert text[0].endswith("STATUS: A status\n") + assert text[1].endswith("STATUS: Start Running a set of steps\n") + assert text[4].endswith("Warning\n") + assert text[5].endswith("TestException caught\n") + + # Check the main log file containing all messages at DEBUG and above + with open(os.path.join(controller.run_dir, log_config["log_file_path"]), "r") as f: + text = [] + for line in f: + text.append(line) + print("Log file: {}".format(text)) + assert ( + len(text) == 12 + ) # INFO, STATUS, DETAIL, STATUS, INFO, STATUS, DEBUG x 3, STATUS, WARN, ERRORR + assert text[0].endswith("INFO: a message\n") + assert text[1].endswith("STATUS: A status\n") + assert text[2].endswith("DETAIL: detailed message\n") + assert text[3].endswith("STATUS: Start Running a set of steps\n") + # debug messages should appear + assert text[7].endswith("A debug message\n") + assert text[8].endswith("A debug report that takes time to produce\n") + # but not trace message + for logline in text: + assert "A trace message" not in logline + # error message recorded + + # todo: resolve + # assert "Error during model run" in text[9] + # assert text[10].startswith("Traceback") + + # Commenting out the following pending resolution of issue#87 + # (Feature: Explain/justify or remove LogCache, special error file) + # Check that the log_on_error is generated and has all messages + # with open( + # os.path.join(controller.run_dir, log_config["log_on_error_file_path"]), "r" + # ) as f: + # text = [] + # for line in f: + # text.append(line) + # assert len(text) == 14 + # assert text[0].startswith("STATUS") + # assert text[0].endswith("Running a set of steps\n") + # # debug and trace messages appear in post error log + # assert "DEBUG A debug message\n" in text + # assert "DEBUG A debug report that takes time to produce\n" in text + # assert "TRACE A trace message\n" in text diff --git a/tests/test_network.py b/tests/test_network.py new file mode 100644 index 00000000..5136664b --- /dev/null +++ b/tests/test_network.py @@ -0,0 +1,14 @@ +"""Testing Network module.""" + +import os +import sys + +import pytest + + +def test_get_blended_skim(inro_context): + """Test get_blended_skim.""" + from tm2py.components.network.skims import get_blended_skim + + # TODO + pass diff --git a/tests/test_run_flow.py b/tests/test_run_flow.py new file mode 100644 index 00000000..75ead6a1 --- /dev/null +++ b/tests/test_run_flow.py @@ -0,0 +1,92 @@ +"""Testing module for UnionCity subarea 'real' model runs.""" + +import os + +import pytest + + +@pytest.mark.skip("Takes a while") +def test_example_download(examples_dir, root_dir, inro_context): + """Tests that example data can be downloaded.""" + EXAMPLE = "UnionCity" + + import shutil + + from tm2py.examples import get_example + + example_root = os.path.join(examples_dir, EXAMPLE) + if os.path.exists(example_root): + shutil.rmtree(example_root) + + # default retrieval_url points to Union City example on box + _ex_dir = get_example(example_name="UnionCity", root_dir=root_dir) + + # check that the root union city folder exists + assert _ex_dir == example_root + assert os.path.isdir(example_root) + + # check some expected files exists + files_to_check = [ + os.path.join("inputs", "hwy", "tolls.csv"), + os.path.join("inputs", "nonres", "2035_fromOAK.csv"), + os.path.join("inputs", "landuse", "maz_data.csv"), + os.path.join("emme_project", "mtc_emme.emp"), + os.path.join("emme_project", "Database_highway", "emmebank"), + ] + for file_name in files_to_check: + assert os.path.exists( + os.path.join(example_root, file_name) + ), f"get_example failed, missing {file_name}" + + # check zip file was removed + assert not (os.path.exists(os.path.join(example_root, "test_data.zip"))) + + +@pytest.fixture(scope="session") +def union_city(examples_dir, root_dir, inro_context): + """Union City model run testing fixture.""" + from tm2py.controller import RunController + from tm2py.examples import get_example + + EXAMPLE = "UnionCity" + _example_root = os.path.join(examples_dir, EXAMPLE) + + get_example(example_name="UnionCity", root_dir=root_dir) + controller = RunController( + [ + os.path.join(examples_dir, "scenario_config.toml"), + os.path.join(examples_dir, "model_config.toml"), + ], + run_dir=_example_root, + ) + controller.run() + return controller + + +def test_validate_input_fail(examples_dir, inro_context, temp_dir): + """Test that validate_input fails when required inputs are missing.""" + import toml + + from tm2py.controller import RunController + from tm2py.examples import get_example + + model_config_path = os.path.join(examples_dir, r"model_config.toml") + with open(model_config_path, "r") as fin: + bad_model_config = toml.load(fin) + bad_model_config["highway"]["tolls"]["file_path"] = "foo.csv" + + bad_model_config_path = os.path.join(temp_dir, r"bad_model_config.toml") + with open(bad_model_config_path, "w") as fout: + toml.dump(bad_model_config, fout) + + union_city_root = os.path.join(examples_dir, "UnionCity") + + with pytest.raises(Exception) as e_info: + RunController( + [ + os.path.join(examples_dir, r"scenario_config.toml"), + bad_model_config_path, + ], + run_dir=union_city_root, + ) + assert e_info.type is FileNotFoundError diff --git a/tests/test_skims.py b/tests/test_skims.py new file mode 100644 index 00000000..70b7d688 --- /dev/null +++ b/tests/test_skims.py @@ -0,0 +1,15 @@ +"""Testing for the tools module.""" + + +def test_get_omx_skim_as_numpy(inro_context): + "Test get_omx_skim_as_numpy." + from tm2py.components.network.skims import get_omx_skim_as_numpy + + # TODO + + +def test_get_blended_skim(inro_context): + "Test get_blended_skim." + from tm2py.components.network.skims import get_blended_skim + + # TODO diff --git a/tests/test_tests.py b/tests/test_tests.py index 2018140c..6bd0294c 100644 --- a/tests/test_tests.py +++ b/tests/test_tests.py @@ -1,3 +1,5 @@ +"""Testing module for making sure tests are run correctly.""" + import pytest diff --git a/tests/test_tools.py b/tests/test_tools.py index f4c8ec8d..9cc75487 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1,6 +1,10 @@ -from unittest.mock import MagicMock -import sys +"""Testing for the tools module.""" + import os +import sys +import tempfile +from unittest.mock import MagicMock + import pytest _EXAMPLE_URL = ( @@ -8,23 +12,101 @@ ) -def test_download_unzip(): - # If (and only if) Emme is not installed, replace INRO libraries with MagicMock - try: - import inro.emme.database.emmebank - except ModuleNotFoundError: - sys.modules["inro.emme.database.emmebank"] = MagicMock() - sys.modules["inro.emme.network"] = MagicMock() - sys.modules["inro.emme.database.scenario"] = MagicMock() - sys.modules["inro.emme.database.matrix"] = MagicMock() - sys.modules["inro.emme.network.node"] = MagicMock() - sys.modules["inro.emme.desktop.app"] = MagicMock() - sys.modules["inro"] = MagicMock() - sys.modules["inro.modeller"] = MagicMock() - +@pytest.mark.skip("Takes a while") +def test_download_unzip(temp_dir, inro_context): + """If (and only if) Emme is not installed, replace INRO libraries with MagicMock.""" from tm2py.tools import _download, _unzip - import tempfile + temp_file = os.path.join(temp_dir, "test_download.zip") + unzip_directory = os.path.join(temp_dir, "test_download") + + print("Downloading test_download.zip") + _download(_EXAMPLE_URL, temp_file) + assert os.path.getsize(temp_file) > 0, "download failed" + + print("Unzipping test_download.zip") + _unzip(temp_file, unzip_directory) + assert os.path.exists(unzip_directory), "unzip failed, no directory" + assert os.path.getsize(unzip_directory) > 0, "unzip failed, empty directory" + + print("Checking for expected files.") + files_to_check = [ + os.path.join("inputs", "hwy", "tolls.csv"), + os.path.join("inputs", "nonres", "2035_fromOAK.csv"), + ] + for file_name in files_to_check: + assert os.path.exists( + os.path.join(unzip_directory, file_name) + ), f"unzip failed, missing {file_name}" + + +def test_interpolate(inro_context): + """Test interpolation.""" + import pandas as pd + from pandas.testing import assert_frame_equal + + from tm2py.tools import interpolate_dfs + + _input_df = pd.DataFrame( + { + "prop1_2020": [20, 200, 2000], + "prop2_2020": [40, 55, 60], + "prop1_2030": [30, 300, 3000], + "prop2_2030": [40, 55, 70], + } + ) + + _2025_output_df = interpolate_dfs(_input_df, [2020, 2030], 2025) + + _2025_expected_output_df = pd.DataFrame( + { + "prop1": [25.0, 250.0, 2500.0], + "prop2": [40.0, 55.0, 65.0], + } + ) + + _2020_output_df = interpolate_dfs(_input_df, [2020, 2030], 2020) + + _2020_expected_output_df = pd.DataFrame( + { + "prop1": [20.0, 200.0, 2000.0], + "prop2": [40.0, 55.0, 60.0], + } + ) + + _2030_output_df = interpolate_dfs(_input_df, [2020, 2030], 2030) + + _2030_expected_output_df = pd.DataFrame( + { + "prop1": [30.0, 300.0, 3000.0], + "prop2": [40.0, 55.0, 70.0], + } + ) + + assert_frame_equal(_2025_output_df, _2025_expected_output_df) + + assert_frame_equal(_2020_output_df, _2020_expected_output_df) + + assert_frame_equal(_2030_output_df, _2030_expected_output_df) + + +def test_df_to_omx(inro_context): + """Test df_to_omx.""" + from tm2py.omx import df_to_omx + + # TODO + + +def test_omx_to_dict(inro_context): + """Test omx to dict.""" + from tm2py.omx import omx_to_dict + + # TODO + + +def test_csv_to_dfs(inro_context): + """Test zonal_csv_to_matrices.""" + from tm2py.tools import _download, _unzip, zonal_csv_to_matrices with tempfile.TemporaryDirectory() as temp_dir: temp_file = os.path.join(temp_dir, "test_download.zip") diff --git a/tests/test_transit.py b/tests/test_transit.py new file mode 100644 index 00000000..f5c6295a --- /dev/null +++ b/tests/test_transit.py @@ -0,0 +1,24 @@ +"Test external travel model." + +import os + +import pytest +from conftest import inro_context + +from tm2py.examples import get_example + + +@pytest.mark.menow +def test_transit(examples_dir, root_dir): + "Tests that internal/external travel component can be run." + from tools import test_component + + get_example(example_name="UnionCity", root_dir=root_dir) + + my_run = test_component( + examples_dir, ["prepare_network_transit", "transit_assign", "transit_skim"] + ) + + my_run.run() + + # TODO write assert diff --git a/tests/test_trucks.py b/tests/test_trucks.py new file mode 100644 index 00000000..009c9f1a --- /dev/null +++ b/tests/test_trucks.py @@ -0,0 +1,19 @@ +"Test commercial vehicle model." + +import os + +import pytest + +from tm2py.examples import get_example + + +def test_commercial_vehicle(examples_dir, root_dir): + "Tests that commercial vehicle component can be run." + from tools import test_component + + get_example(example_name="UnionCity", root_dir=root_dir) + + my_run = test_component(examples_dir, "truck") + my_run.run_next() + + # TODO write assert diff --git a/tests/tools.py b/tests/tools.py new file mode 100644 index 00000000..77334e06 --- /dev/null +++ b/tests/tools.py @@ -0,0 +1,69 @@ +"Utilities for testing." + +import os +from typing import Collection + +import openmatrix as omx +import pandas as pd + + +def assert_csv_equal(ref_csv: str, run_csv: str): + """Compare two csv files, return results of pd.testing.assert_frame_equal(). + + Args: + ref_csv (str): Reference CSV location + run_csv (str): Model run CSV location + + Returns: + Results of pd.testing.assert_frame_equal() + """ + ref_df = pd.read_csv(ref_csv) + run_df = pd.read_csv(run_csv) + return pd.testing.assert_frame_equal(ref_df, run_df) + + +def diff_omx(ref_omx: str, run_omx: str) -> Collection[Collection[str]]: + """Compare two OMX files, return missing and different matrices from reference. + + Args: + ref_omx: reference OMX file + run_omx: run OMX file + """ + _ref_f = omx.open_file(ref_omx, "r") + _run_f = omx.open_file(run_omx, "r") + _ref_matrix_names = _ref_f.list_matrices() + _run_matrix_names = _run_f.list_matrices() + + missing_matrices = [f for f in _ref_matrix_names if f not in _run_matrix_names] + different_matrices = [] + for m_key in _ref_matrix_names: + _ref_matrix = _ref_f[m_key].read() + _run_matrix = _run_f[m_key].read() + if not (_ref_matrix == _run_matrix).all(): + different_matrices.append(m_key) + + _ref_f.close() + _run_f.close() + return missing_matrices, different_matrices + + +def test_component(examples_dir, component, example_name="UnionCity"): + from tm2py.controller import RunController + + base_configs = [ + examples_dir / "model_config.toml", + examples_dir / "scenario_config.toml", + ] + + if isinstance(component, list): + my_components = component + else: + my_components = [component] + print(f"TESTING COMPONENTS: {my_components}") + my_run = RunController( + base_configs, run_dir=examples_dir / example_name, run_components=my_components + ) + # TODO RUN COMPONENT + print(f"RIGHT NOW JUST INITIATING - NOT RUNNING") + print(my_run) + return my_run diff --git a/tm2py/__init__.py b/tm2py/__init__.py index e69de29b..e2c0d299 100644 --- a/tm2py/__init__.py +++ b/tm2py/__init__.py @@ -0,0 +1,30 @@ +"""Base of tm2py module.""" +from ._version import __version__ +from .components.component import Component +from .config import ( + Configuration, + HouseholdConfig, + RunConfig, + ScenarioConfig, + TimePeriodConfig, +) +from .controller import RunController +from .examples import get_example +from .logger import Logger, LogStartEnd + +__all__ = [ + # component + "Component", + # config + "Configuration", + "get_example", + "HouseholdConfig", + "RunConfig", + "ScenarioConfig", + "TimePeriodConfig", + # controller + "RunController", + # logger + "Logger", + "LogStartEnd", +] diff --git a/tm2py/_version.py b/tm2py/_version.py index b408052a..07864825 100644 --- a/tm2py/_version.py +++ b/tm2py/_version.py @@ -1,3 +1,3 @@ -"""Version info""" +"""Version info.""" __version__ = "0.0.1" diff --git a/tm2py/components/__init__.py b/tm2py/components/__init__.py index e69de29b..9310dab1 100644 --- a/tm2py/components/__init__.py +++ b/tm2py/components/__init__.py @@ -0,0 +1 @@ +"""Module for all components.""" diff --git a/tm2py/components/component.py b/tm2py/components/component.py index d4543e87..bdeab7c4 100644 --- a/tm2py/components/component.py +++ b/tm2py/components/component.py @@ -1,19 +1,61 @@ -"""Root component ABC -""" +"""Root component ABC.""" + from __future__ import annotations + import os from abc import ABC, abstractmethod +from pathlib import Path +from typing import TYPE_CHECKING, List, Union -from typing import TYPE_CHECKING, List - -from tm2py.emme.manager import EmmeScenario +from tm2py.emme.manager import Emmebank, EmmeScenario if TYPE_CHECKING: from tm2py.controller import RunController +class FileFormatError(Exception): + """Exception raised when a file is not in the expected format.""" + + def __init__(self, f, *args): + """Exception for invalid file formats.""" + super().__init__(args) + self.f = f + + def __str__(self): + """String representation for FileFormatError.""" + return f"The {self.f} is not a valid format." + + class Component(ABC): - """Base component class for tm2py top-level inheritance. + """Template for Component class with several built-in methods. + + A component is a piece of the model that can be run independently (of other components) given + the required input data and configuration. It communicates information to other components via + disk I/O (including the emmebank). + + Note: if the component needs data that is not written to disk, it would be considered a + subcomponent. + + Abstract Methods – Each component class must have the following methods: + __init___: constructor, which associates the RunController with the instantiated object + run: run the component without any arguments + validate_inputs: validate the inputs to the component + report_progress: report progress to the user + verify: verify the component's output + write_top_sheet: write outputs to topsheet + test_component: test the component + + Template Class methods - component classes inherit: + get_abs_path: convenience method to get absolute path of the run directory + + + Template Class Properties - component classes inherit: + controller: RunController object + config: Config object + time_period_names: convenience property + top_sheet: topsheet object + logger: logger object + trace: trace object Example: :: @@ -35,78 +77,113 @@ def _step2(self): """ def __init__(self, controller: RunController): + """Model component template/abstract base class. + + Args: + controller (RunController): Reference to the run controller object. + """ self._controller = controller self._trace = None @property def controller(self): - """Parent controller""" + """Parent controller.""" return self._controller - def get_abs_path(self, rel_path: str): - """Get the absolute path from the root run directory given a relative path.""" - return os.path.join(self.controller.run_dir, rel_path) + @property + def emme_manager(self): + return self.controller.emme_manager - def get_emme_scenario(self, emmebank_path: str, time_period: str) -> EmmeScenario: - """Get the Emme scenario object from the Emmebank at emmebank_path for the time_period ID. + def get_abs_path(self, path: Union[Path, str]) -> str: + """Convenince method to get absolute path from run directory.""" + if not os.path.isabs(path): + return self.controller.get_abs_path(path).__str__() + else: + return path - Args: - emmebank_path: valid Emmebank path, absolute or relative to root run directory - time_period: valid time_period ID + @property + def time_period_names(self) -> List[str]: + """Return input time_period name or names and return list of time_period names. + + Implemented here for easy access for all components. - Returns - Emme Scenario object (see Emme API Reference) + Returns: list of uppercased string names of time periods """ - if not os.path.isabs(emmebank_path): - emmebank_path = self.get_abs_path(emmebank_path) - emmebank = self.controller.emme_manager.emmebank(emmebank_path) - scenario_id = {tp.name: tp.emme_scenario_id for tp in self.config.time_periods}[ - time_period - ] - return emmebank.scenario(scenario_id) + return self.controller.time_period_names + + @property + def time_period_durations(self) -> dict: + """Return mapping of time periods to durations in hours.""" + return self.controller.time_period_durations @property - def config(self): - """Configuration settings loaded from config files""" - return self.controller.config + def congested_transit_assn_max_iteration(self) -> dict: + """Return mapping of time periods to max iteration in congested transit assignment.""" + return self.controller.congested_transit_assn_max_iteration @property def top_sheet(self): - """docstring placeholder for top sheet""" + """Reference to top sheet.""" return self.controller.top_sheet @property def logger(self): - """docstring placeholder for logger""" + """Reference to logger.""" return self.controller.logger @property def trace(self): - """docstring placeholder for trace""" + """Reference to trace.""" return self._trace + @abstractmethod def validate_inputs(self): - """Validate inputs are correct at model initiation, fail fast if not""" + """Validate inputs are correct at model initiation, raise on error.""" @abstractmethod def run(self): - """Run model component""" + """Run model component.""" + # @abstractmethod def report_progress(self): - """Write progress to log file""" + """Write progress to log file.""" - def test_component(self): - """Run stand-alone component test""" + # @abstractmethod + def verify(self): + """Verify component outputs / results.""" + # @abstractmethod def write_top_sheet(self): - """Write key outputs to the model top sheet""" + """Write key outputs to the model top sheet.""" - def verify(self): - """Verify component outputs / results""" - def time_period_names(self) -> List[str]: - """Return input time_period name or names and return list of time_period names. +class Subcomponent(Component): + """Template for sub-component class. + + A sub-component is a more loosly defined component that allows for input into the run() + method. It is used to break-up larger processes into smaller chunks which can be: + (1) re-used across components (i.e toll choice) + (2) updated/subbed in to a parent component(s) run method based on the expected API + (3) easier to test, understand and debug. + (4) more consistent with the algorithms we understand from transportation planning 101 + """ + + def __init__(self, controller: RunController, component: Component): + """Constructor for model sub-component abstract base class. + + Only calls the super class constructor. + + Args: + controller (RunController): Reference to the run controller object. + component (Component): Reference to the parent component object. + """ + super().__init__(controller) + self.component = component + + @abstractmethod + def run(self, *args, **kwargs): + """Run sub-component, allowing for multiple inputs. - Returns: list of string names of time periods + Allowing for inputs to the run() method is what differentiates a sub-component from + a component. """ - return [time.name for time in self.config.time_periods] diff --git a/tm2py/components/demand/__init__.py b/tm2py/components/demand/__init__.py index 13370d5b..36a57e07 100644 --- a/tm2py/components/demand/__init__.py +++ b/tm2py/components/demand/__init__.py @@ -1 +1 @@ -"""Demand components module""" +"""Demand components module.""" diff --git a/tm2py/components/demand/air_passenger.py b/tm2py/components/demand/air_passenger.py index e69de29b..b6ae5543 100644 --- a/tm2py/components/demand/air_passenger.py +++ b/tm2py/components/demand/air_passenger.py @@ -0,0 +1,286 @@ +"""Module containing the AirPassenger class which builds the airport trip matrices.""" + + +from __future__ import annotations + +import itertools +import os +from typing import TYPE_CHECKING + +import numpy as np +import openmatrix as _omx +import pandas as pd + +from tm2py.components.component import Component +from tm2py.logger import LogStartEnd +from tm2py.omx import df_to_omx +from tm2py.tools import interpolate_dfs + +if TYPE_CHECKING: + from tm2py.controller import RunController + + +class AirPassenger(Component): + """Builds the airport trip matrices. + + input: nonres/{year}_{tofrom}{airport}.csv + output: five time-of-day-specific OMX files with matrices DA, SR2, SR3 + + Notes: + These are independent of level-of-service. + + Note that the reference names, years, file paths and other key details + are controlled via the config, air_passenger section. See the + AirPassengerConfig doc for details on specifying these inputs. + + The following details are based on the default config values. + + Creates air passenger vehicle trip tables for the Bay Area's three major + airports, namely SFO, OAK, and SJC. Geoff Gosling, a consultant, created + vehicle trip tables segmented by time of day, travel mode, and access/egress + direction (i.e. to the airport or from the airport) for years 2007 and 2035. + The tables are based on a 2006 Air Passenger survey, which was conducted + at SFO and OAK (but not SJC). + + The travel modes are as follows: + (a) escort (drive alone, shared ride 2, and shared ride 3+) + (b) park (da, sr2, & sr3+) + (c) rental car (da, sr2, & sr3+) + (d) taxi ((da, sr2, & sr3+) + (e) limo (da, sr2, & sr3+) + (f) shared ride van (all assumed to be sr3); + (g) hotel shuttle (all assumed to be sr3); and, + (h) charter bus (all assumed to be sr3). + + The shared ride van, hotel shuttle, and charter bus modes are assumed to + have no deadhead travel. The return escort trip is included, as are the + deadhead limo and taxi trips. + + The scripts reads in csv files adapted from Mr. Gosling's Excel files, + and creates a highway-assignment ready OMX matrix file for each time-of-day + interval. + + Assumes that no air passengers use HOT lanes (probably not exactly true + in certain future year scenarios, but the assumption is made here as a + simplification). Simple linear interpolations are used to estimate vehicle + demand in years other than 2007 and 2035, including 2015, 2020, 2025, 2030, + and 2040. + + Transit travel to the airports is not included in these vehicle trip tables. + + Input: + Year-, access/egress-, and airport-specific database file with 90 columns + of data for each TAZ. There are 18 columns for each time-of-day interval + as follows: + (1) Escort, drive alone + (2) Escort, shared ride 2 + (3) Escort, shared ride 3+ + (4) Park, drive alone + (5) Park, shared ride 2 + (6) Park, shared ride 3+ + (7) Rental car, drive alone + (8) Rental car, shared ride 2 + (9) Rental car, shared ride 3+ + (10) Taxi, drive alone + (11) Taxi, shared ride 2 + (12) Taxi, shared ride 3+ + (13) Limo, drive alone + (14) Limo, shared ride 2 + (15) Limo, shared ride 3+ + (16) Shared ride van, shared ride 3+ + (17) Hotel shuttle, shared ride 3+ + (18) Charter bus, shared ride 3+ + + Output: + Five time-of-day-specific tables, each containing origin/destination vehicle + matrices for the following modes: + (1) drive alone (DA) + (2) shared ride 2 (SR2) + (3) shared ride 3+ (SR3) + + Internal properties: + _start_year + _end_year + _mode_groups: + _out_names: + """ + + def __init__(self, controller: RunController): + """Build the airport trip matrices. + + Args: + controller: parent Controller object + """ + super().__init__(controller) + + self.config = self.controller.config.air_passenger + + self.start_year = self.config.reference_start_year + self.end_year = self.config.reference_end_year + self.scenario_year = self.controller.config.scenario.year + + self.airports = self.controller.config.air_passenger.airport_names + + self._demand_classes = None + self._access_mode_groups = None + self._class_modes = None + + @property + def classes(self): + return [c.name for c in self.config.demand_aggregation] + + @property + def demand_classes(self): + if not self._demand_classes: + self._demand_classes = {c.name: c for c in self.config.demand_aggregation} + return self._demand_classes + + @property + def access_mode_groups(self): + if not self._access_mode_groups: + self._access_mode_groups = { + c_name: c.access_modes for c_name, c in self.demand_classes.items() + } + return self._access_mode_groups + + @property + def class_modes(self): + if self._class_modes is None: + self._class_modes = { + c_name: c.mode for c_name, c in self.demand_classes.items() + } + return self._class_modes + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + pass + + @LogStartEnd() + def run(self): + """Run the Air Passenger Demand model to generate the demand matrices. + + Steps: + 1. Load the demand data from the CSV files. + 2. Aggregate the demand data into the assignable classes. + 3. Create the demand matrices be interpolating the demand data. + 4. Write the demand matrices to OMX files. + """ + + input_demand = self._load_air_pax_demand() + aggr_demand = self._aggregate_demand(input_demand) + + demand = interpolate_dfs( + aggr_demand, + [self.start_year, self.end_year], + self.scenario_year, + ) + self._export_result(demand) + + def _load_air_pax_demand(self) -> pd.DataFrame: + """Loads demand from the CSV files into single pandas dataframe. + + Uses the following configs to determine the input file names and paths: + - self.config.air_passenger.input_demand_folder + - self.config.air_passenger.airport_names + - self.config.air_passenger.reference_start_year + - self.config.air_passenger.reference_end_year + + Using the pattern: f"{year}_{direction}{airport}.csv" + + Returns: pandas dataframe with the following columns: + (1) airport + (2) time_of_day + (3) access_mode + (4) demand + """ + + _start_demand_df = self._get_air_demand_for_year(self.start_year) + _end_demand_df = self._get_air_demand_for_year(self.end_year) + + _air_pax_demand_df = pd.merge( + _start_demand_df, + _end_demand_df, + how="outer", + suffixes=(f"_{self.start_year}", f"_{self.end_year}"), + on=["ORIG", "DEST"], + ) + + _grouped_air_pax_demand_df = _air_pax_demand_df.groupby(["ORIG", "DEST"]).sum() + return _grouped_air_pax_demand_df + + def _input_demand_filename(self, airport, year, direction): + _file_name = self.config.input_demand_filename_tmpl.format( + airport=airport, year=year, direction=direction + ) + + return os.path.join( + self.get_abs_path(self.config.input_demand_folder), _file_name + ) + + def _get_air_demand_for_year(self, year) -> pd.DataFrame: + """Creates a dataframe of concatenated data from CSVs for all airport x direction combos. + + Args: + year (str): year of demand + + Returns: + pd.DataFrame: concatenation of all CSVs that were read in as a dataframe + """ + _airport_direction = itertools.product( + self.airports, + ["to", "from"], + ) + demand_df = None + for airport, direction in _airport_direction: + _df = pd.read_csv(self._input_demand_filename(airport, year, direction)) + if demand_df is not None: + demand_df = pd.concat([demand_df, _df]) + else: + demand_df = _df + + return demand_df + + def _aggregate_demand(self, input_demand: pd.DataFrame) -> pd.DataFrame: + """Aggregate demand accross access modes to assignable classes for each year. + + Args: + input_demand: pandas dataframe with the columns for each combo of: + {_period}_{_access}_{_group}_{_year} + """ + aggr_demand = pd.DataFrame() + + _year_tp_group_accessmode = itertools.product( + [self.start_year, self.end_year], + self.time_period_names, + self.access_mode_groups.items(), + ) + + # TODO This should be done entirely in pandas using group-by + for _year, _period, (_class, _access_modes) in _year_tp_group_accessmode: + data = input_demand[ + [f"{_period}_{_access}_{_class}_{_year}" for _access in _access_modes] + ] + aggr_demand[f"{_period}_{_class}_{_year}"] = data.sum(axis=1) + + return aggr_demand + + def _export_result(self, demand_df: pd.DataFrame): + """Export resulting model year demand to OMX files by period.""" + path_tmplt = self.get_abs_path(self.config.output_trip_table_directory) + os.makedirs(os.path.dirname(path_tmplt), exist_ok=True) + + for _period in self.time_period_names: + _file_path = os.path.join( + path_tmplt, self.config.outfile_trip_table_tmp.format(period=_period) + ) + df_to_omx( + demand_df, + { + _mode: f"{_period}_{_class}" + for _class, _mode in self.class_modes.items() + }, + _file_path, + orig_column="ORIG", + dest_column="DEST", + ) diff --git a/tm2py/components/demand/commercial.py b/tm2py/components/demand/commercial.py index e69de29b..17856422 100644 --- a/tm2py/components/demand/commercial.py +++ b/tm2py/components/demand/commercial.py @@ -0,0 +1,927 @@ +"""Commercial vehicle / truck model module.""" + +from __future__ import annotations + +import itertools +import os +from collections import defaultdict +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +import numpy as np +import pandas as pd + +from tm2py.components.component import Component, Subcomponent +from tm2py.components.demand.toll_choice import TollChoiceCalculator +from tm2py.components.network.skims import get_blended_skim +from tm2py.emme.matrix import MatrixCache, OMXManager +from tm2py.logger import LogStartEnd +from tm2py.tools import zonal_csv_to_matrices + +if TYPE_CHECKING: + from tm2py.controller import RunController + +NumpyArray = np.array + + +# mployment category mappings, grouping into larger categories +_land_use_aggregation = { + "AGREMPN": ["ag"], + "RETEMPN": ["ret_loc", "ret_reg"], + "FPSEMPN": ["fire", "info", "lease", "prof", "serv_bus"], + "HEREMPN": [ + "art_rec", + "eat", + "ed_high", + "ed_k12", + "ed_oth", + "health", + "hotel", + "serv_pers", + "serv_soc", + ], + "MWTEMPN": [ + "logis", + "man_bio", + "man_hvy", + "man_lgt", + "man_tech", + "natres", + "transp", + "util", + ], + "OTHEMPN": ["constr", "gov"], + "TOTEMP": ["emp_total"], + "TOTHH": ["HH"], +} + + +class CommercialVehicleModel(Component): + """Commercial Vehicle demand model. + + Generates truck demand matrices from: + - land use + - highway network impedances + - parameters + + Segmented into four truck types: + (1) very small trucks (two-axle, four-tire), + (2) small trucks (two-axle, six-tire), + (3) medium trucks (three-axle), + (4) large or combination (four or more axle) trucks. + + Input: (1) MAZ csv data file with the employment and household counts. + (2) Skims + (3) K-Factors + (4) + Output: + + Notes: + (1) Based on the BAYCAST truck model, no significant updates. + (2) Combined Chuck's calibration adjustments into the NAICS-based model coefficients. + """ + + def __init__(self, controller: RunController): + """Constructor for the CommercialVehicleTripGeneration component. + + Args: + controller (RunController): Run controller for model run. + """ + super().__init__(controller) + + self.config = self.controller.config.truck + self.sub_components = { + "trip generation": CommercialVehicleTripGeneration(controller, self), + "trip distribution": CommercialVehicleTripDistribution(controller, self), + "time of day": CommercialVehicleTimeOfDay(controller, self), + "toll choice": CommercialVehicleTollChoice(controller, self), + } + + self.trk_impedances = {imp.name: imp for imp in self.config.impedances} + + # Emme matrix management (lazily evaluated) + self._matrix_cache = None + + # Interim Results + self.total_tripends_df = None + self.daily_demand_dict = None + self.trkclass_tp_demand_dict = None + self.trkclass_tp_toll_demand_dict = None + + @property + def purposes(self): + return list( + set([trk_class.purpose for trk_class in self.config.trip_gen.classes]) + ) + + @property + def classes(self): + return [trk_class.name for trk_class in self.config.classes] + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + + @LogStartEnd() + def run(self): + """Run commercial vehicle model.""" + self.total_tripends_df = self.sub_components["trip generation"].run() + self.daily_demand_dict = self.sub_components["trip distribution"].run( + self.total_tripends_df + ) + self.trkclass_tp_demand_dict = self.sub_components["time of day"].run( + self.daily_demand_dict + ) + self.trkclass_tp_toll_demand_dict = self.sub_components["toll choice"].run( + self.trkclass_tp_demand_dict + ) + self._export_results_as_omx(self.trkclass_tp_toll_demand_dict) + + @property + def emmebank(self): + """Reference to highway assignment Emmebank. + + TODO + This should really be in the controller? + Or part of network.skims? + """ + self._emmebank = self.controller.emme_manager.highway_emmebank + return self._emmebank + + @property + def emme_scenario(self): + """Return emme scenario from emmebank. + + Use first valid scenario for reference Zone IDs. + + TODO + This should really be in the controller? + Or part of network.skims? + """ + _ref_scenario_name = self.controller.config.time_periods[0].name + return self.emmebank.scenario(_ref_scenario_name) + + @property + def matrix_cache(self): + """Access to MatrixCache to Emmebank for given emme_scenario.""" + if self._matrix_cache is None: + self._matrix_cache = MatrixCache(self.emme_scenario) + return self._matrix_cache + + @LogStartEnd(level="DEBUG") + def _export_results_as_omx(self, class_demand): + """Export assignable class demands to OMX files by time-of-day.""" + outdir = self.get_abs_path(self.config.output_trip_table_directory) + os.makedirs(os.path.dirname(outdir), exist_ok=True) + for period, matrices in class_demand.items(): + with OMXManager( + os.path.join( + outdir, self.config.outfile_trip_table_tmp.format(period=period) + ), + "w", + ) as output_file: + for name, data in matrices.items(): + output_file.write_array(data, name) + + +class CommercialVehicleTripGeneration(Subcomponent): + """Commercial vehicle (truck) Trip Generation for 4 sizes of truck. + + The four truck types are: + (1) very small trucks (two-axle, four-tire), + (2) small trucks (two-axle, six-tire), + (3) medium trucks (three-axle), + (4) large or combination (four or more axle) trucks. + + Input: (1) MAZ csv data file with the employment and household counts. + Ouput: Trips by 4 truck sizes + + Trip generation + --------------- + Use linear regression models to generate trip ends, + balancing attractions to productions. Based on BAYCAST truck model. + + The truck trip generation models for small trucks (two-axle, six tire), + medium trucks (three-axle), and large or combination (four or more axle) + trucks are taken directly from the study: "I-880 Intermodal Corridor Study: + Truck Travel in the San Francisco Bay Area", prepared by Barton Aschman in + December 1992. The coefficients are on page 223 of this report. + + The very small truck generation model is based on the Phoenix four-tire + truck model documented in the TMIP Quick Response Freight Manual. + + Note that certain production models previously used SIC-based employment + categories. To both maintain consistency with the BAYCAST truck model and + update the model to use NAICS-based employment categories, new regression + models were estimated relating the NAICS-based employment data with the + SIC-based-predicted trips. The goal here is not to create a new truck + model, but to mimic the old model with the available data. Please see + the excel spreadsheet TruckModel.xlsx for details. The NAICS-based model + results replicate the SIC-based model results quite well. + """ + + def __init__(self, controller: RunController, component: Component): + """Constructor for the CommercialVehicleTripGeneration component. + + Args: + controller (RunController): Run controller for model run. + component (Component): Parent component of sub-component + """ + super().__init__(controller, component) + self.config = self.component.config.trip_gen + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + pass + + @LogStartEnd() + def run(self): + """Run commercial vehicle trip distribution.""" + _landuse_df = self._aggregate_landuse() + _unbalanced_tripends_df = self._generate_trip_ends(_landuse_df) + _balanced_tripends_df = self._balance_pa(_unbalanced_tripends_df) + total_tripends_df = self._aggregate_by_class(_balanced_tripends_df) + return total_tripends_df + + @LogStartEnd(level="DEBUG") + def _aggregate_landuse(self) -> pd.DataFrame: + """Aggregates landuse data from input CSV by MAZ to TAZ and employment groups. + + TOTEMP, total employment (same regardless of classification system) + RETEMPN, retail trade employment per the NAICS classification system + FPSEMPN, financial and professional services employment per NAICS + HEREMPN, health, educational, and recreational employment per NAICS + OTHEMPN, other employment per the NAICS classification system + AGREMPN, agricultural employment per the NAICS classificatin system + MWTEMPN, manufacturing, warehousing, and transportation employment per NAICS + TOTHH, total households + """ + maz_data_file = self.get_abs_path( + self.controller.config.scenario.maz_landuse_file + ) + maz_input_data = pd.read_csv(maz_data_file) + zones = self.component.emme_scenario.zone_numbers + maz_input_data = maz_input_data[maz_input_data["TAZ_ORIGINAL"].isin(zones)] + taz_input_data = maz_input_data.groupby(["TAZ_ORIGINAL"]).sum() + taz_input_data = taz_input_data.sort_values(by="TAZ_ORIGINAL") + # combine categories + taz_landuse = pd.DataFrame() + for total_column, sub_categories in _land_use_aggregation.items(): + taz_landuse[total_column] = taz_input_data[sub_categories].sum(axis=1) + taz_landuse.reset_index(inplace=True) + return taz_landuse + + @LogStartEnd(level="DEBUG") + def _generate_trip_ends(self, landuse_df: pd.DataFrame) -> pd.DataFrame: + """Generate productions and attractions by class based on landuse and truck trip rates. + + Args: + landuse_df (pd.DataFrame): DataFrame with aggregated landuse data. + Expected columns for landuse are: AGREMPN, RETEMPN, FPSEMPN, HEREMPN, + MWTEMPN, OTHEMPN, TOTEMP, TOTHH + + Returns: + pd.DataFrame: DataFrame with unbalanced production and attraction trip ends. + """ + tripends_df = pd.DataFrame() + + _class_pa = itertools.product( + self.config.classes, + ["production_formula", "attraction_formula"], + ) + + # TODO Do this with multi-indexing rather than relying on column naming + + for _c, _pa in _class_pa: + + _trip_type = _c.purpose + _trk_class = _c.name + + if _pa.endswith("_formula"): + _pa_short = _pa.split("_")[0] + + # linked trips (non-garage-based) - attractions (equal productions) + if (_trip_type == "linked") & (_pa_short == "attraction"): + tripends_df[f"{_trip_type}_{_trk_class}_{_pa_short}s"] = tripends_df[ + f"{_trip_type}_{_trk_class}_productions" + ] + else: + _constant = _c[_pa].constant + _multiplier = _c[_pa].multiplier + + land_use_rates = pd.DataFrame(_c[_pa].land_use_rates).T + land_use_rates = land_use_rates.rename( + columns=land_use_rates.loc["property"] + ).drop("property", axis=0) + + _rate_trips_df = landuse_df.mul(land_use_rates.iloc[0]) + _trips_df = _rate_trips_df * _multiplier + _constant + + tripends_df[f"{_trip_type}_{_trk_class}_{_pa_short}s"] = _trips_df.sum( + axis=1 + ).round() + + return tripends_df + + @LogStartEnd(level="DEBUG") + def _balance_pa(self, tripends_df: pd.DataFrame) -> pd.DataFrame: + """Balance production and attractions. + + Args: + tripends_df (pd.DataFrame): DataFrame with unbalanced production and attraction + trip ends. + + Returns: + pd.DataFrame: DataFrame with balanced production and attraction trip ends. + """ + + for _c in self.config.classes: + _trip_type = _c.purpose + _trk_class = _c.name + _balance_to = _c.balance_to + + _tots = { + "attractions": tripends_df[ + f"{_trip_type}_{_trk_class}_attractions" + ].sum(), + "productions": tripends_df[ + f"{_trip_type}_{_trk_class}_productions" + ].sum(), + } + + # if productions OR attractions are zero, fill one with other + if not _tots["attractions"]: + tripends_df[f"{_trip_type}_{_trk_class}_attractions"] = tripends_df[ + f"{_trip_type}_{_trk_class}_productions" + ] + + elif not _tots["productions"]: + tripends_df[f"{_trip_type}_{_trk_class}_productions"] = tripends_df[ + f"{_trip_type}_{_trk_class}_attractions" + ] + + # otherwise balance based on sums + elif _balance_to == "productions": + tripends_df[f"{_trip_type}_{_trk_class}_attractions"] = tripends_df[ + f"{_trip_type}_{_trk_class}_attractions" + ] * (_tots["productions"] / _tots["attractions"]) + + elif _balance_to == "attractions": + tripends_df[f"{_trip_type}_{_trk_class}_productions"] = tripends_df[ + f"{_trip_type}_{_trk_class}_productions" + ] * (_tots["attractions"] / _tots["productions"]) + else: + raise ValueError(f"{_balance_to} is not a valid balance_to value") + return tripends_df + + @LogStartEnd(level="DEBUG") + def _aggregate_by_class(self, tripends_df: pd.DataFrame) -> pd.DataFrame: + """Sum tripends by class across trip purpose. + + Args: + tripends_df (pd.DataFrame): DataFrame with balanced production and attraction + + Returns: + pd.DataFrame: DataFrame with aggregated tripends by truck class. Returned columns are: + vsmtrk_prod, vsmtrk_attr, + smltrk_prod, smltrk_attr, + medtrk_prod, medtrk_attr, + lrgtrk_prod, lrgtrk_attr + """ + agg_tripends_df = pd.DataFrame() + + _class_pa = itertools.product( + self.component.classes, + ["productions", "attractions"], + ) + + for _trk_class, _pa in _class_pa: + _sum_cols = [ + c for c in tripends_df.columns if c.endswith(f"_{_trk_class}_{_pa}") + ] + agg_tripends_df[f"{_trk_class}_{_pa}"] = pd.Series( + tripends_df[_sum_cols].sum(axis=1) + ) + + agg_tripends_df.round(decimals=7) + + self.logger.log(agg_tripends_df.describe().to_string(), level="DEBUG") + + return agg_tripends_df + + +class CommercialVehicleTripDistribution(Subcomponent): + """Commercial vehicle (truck) Trip Distribution for 4 sizes of truck. + + The four truck types are: + (1) very small trucks (two-axle, four-tire), + (2) small trucks (two-axle, six-tire), + (3) medium trucks (three-axle), + (4) large or combination (four or more axle) trucks. + + Input: (1) Trips by 4 truck sizes + (2) highway skims for truck, time, distance, bridgetoll and value toll + (3) friction factors lookup table + (4) k-factors matrix + Ouput: Trips origin and destination matrices by 4 truck sizes + + A simple gravity model is used to distribute the truck trips, with + separate friction factors used for each class of truck. + + A blended travel time is used as the impedance measure, specifically the weighted average + of the AM travel time (one-third weight) and the midday travel time (two-thirds weight). + + Input: + Level-of-service matrices for the AM peak period (6 am to 10 am) and midday + period (10 am to 3 pm) which contain truck-class specific estimates of + congested travel time (in minutes) + + A matrix of k-factors, as calibrated by Chuck Purvis. Note the very small truck model + does not use k-factors; the small, medium, and large trucks use the same k-factors. + + A table of friction factors in text format with the following fields, space separated: + - impedance measure (blended travel time); + - friction factors for very small trucks; + - friction factors for small trucks; + - friction factors for medium trucks; and, + - friction factors for large trucks. + + Notes on distribution steps: + load nonres/truck_kfactors_taz.csv + load nonres/truckFF.dat + Apply friction factors and kfactors to produce balancing matrix + apply the gravity models using friction factors from nonres/truckFF.dat + (note the very small trucks do not use the K-factors) + Can use Emme matrix balancing for this - important note: reference + matrices by name and ensure names are unique + Trips rounded to 0.01, causes some instability in results + + Results: four total daily trips by truck type + + Notes: + (1) Based on the BAYCAST truck model, no significant updates. + (2) Combined Chuck's calibration adjustments into the NAICS-based model coefficients. + + """ + + def __init__(self, controller: RunController, component: Component): + """Constructor for the CommercialVehicleTripDistribution component. + + Args: + controller (RunController): Run controller for model run. + component (Component): Parent component of sub-component + """ + super().__init__(controller, component) + + self.config = self.component.config.trip_dist + self._k_factors = None + self._blended_skims = {} + self._friction_factors = None + self._friction_factor_matrices = {} + + self._class_config = None + + @property + def class_config(self): + if not self._class_config: + self._class_config = {c.name: c for c in self.config.classes} + + return self._class_config + + @property + def k_factors(self): + """Zone-to-zone values of truck K factors. + + Returns: + NumpyArray: Zone-to-zone values of truck K factors. + """ + if self._k_factors is None: + self._k_factors = self._load_k_factors() + return self._k_factors + + def _load_k_factors(self): + """Loads k-factors from self.config.truck.k_factors_file csv file. + + Returns: + NumpyArray: Zone-to-zone values of truck K factors. + + """ + """return zonal_csv_to_matrices( + self.get_abs_path(self.config.k_factors_file), + i_column="I_taz_tm2_v2_2", + j_column="J_taz_tm2_v2_2", + value_columns="truck_k", + fill_zones=True, + default_value=0, + max_zone=max(self.component.emme_scenario.zone_numbers), + )["truck_k"].values""" + data = pd.read_csv(self.get_abs_path(self.config.k_factors_file)) + zones = np.unique(data["I_taz_tm2_v2_2"]) + num_data_zones = len(zones) + row_index = np.searchsorted(zones, data["I_taz_tm2_v2_2"]) + col_index = np.searchsorted(zones, data["J_taz_tm2_v2_2"]) + k_factors = np.zeros((num_data_zones, num_data_zones)) + k_factors[row_index, col_index] = data["truck_k"] + num_zones = len(self.component.emme_scenario.zone_numbers) + padding = ((0, num_zones - num_data_zones), (0, num_zones - num_data_zones)) + k_factors = np.pad(k_factors, padding) + + return k_factors + + def blended_skims(self, mode: str): + """Get blended skim. Creates it if doesn't already exist. + + Args: + mode (str): Mode for skim + + Returns: + _type_: _description_ + """ + if mode not in self._blended_skims: + self._blended_skims[mode] = get_blended_skim( + self.controller, + mode=mode, + blend=self.component.trk_impedances[mode]["time_blend"], + ) + return self._blended_skims[mode] + + def friction_factor_matrices( + self, trk_class: str, k_factors: Union[None, NumpyArray] = None + ) -> NumpyArray: + """Zone to zone NumpyArray of impedances for a given truck class. + + Args: + trk_class (str): Truck class abbreviated name + k_factors (Union[None,NumpyArray]): If not None, gives an zone-by-zone array of + k-factors--additive impedances to be added on top of friciton factors. + Defaults to None. + + Returns: + NumpyArray: Zone-by-zone matrix of friction factors + """ + if trk_class not in self._friction_factor_matrices.keys(): + self._friction_factor_matrices[ + trk_class + ] = self._calculate_friction_factor_matrix( + trk_class, + self.class_config[trk_class].impedance, + self.k_factors, + self.class_config[trk_class].use_k_factors, + ) + + return self._friction_factor_matrices[trk_class] + + @LogStartEnd(level="DEBUG") + def _calculate_friction_factor_matrix( + self, + segment_name, + blended_skim_name: str, + k_factors: Union[None, NumpyArray] = None, + use_k_factors: bool = False, + ): + """Calculates friction matrix by interpolating time; optionally multiplying by k_factors. + + Args: + segment_name: Name of the segment to calculate the friction factors for (i.e. vstruck) + blended_skim_name (str): Name of blended skim + k_factors (Union[None,NumpyArray): Optional k-factors matrix + + Returns: + friction_matrix NumpyArray: friction matrix for a truck class + """ + _friction_matrix = np.interp( + self.blended_skims(blended_skim_name), + self.friction_factors["time"].tolist(), + self.friction_factors[segment_name], + ) + + if use_k_factors: + if k_factors is not None: + _friction_matrix = _friction_matrix * k_factors + + return _friction_matrix + + @property + def friction_factors(self): + """Table of friction factors for each time band by truck class. + + Returns: + pd.DataFrame: DataFrame of friction factors read from disk. + """ + if self._friction_factors is None: + self._friction_factors = self._read_ffactors() + return self._friction_factors + + def _read_ffactors(self) -> pd.DataFrame: + """Load friction factors lookup tables from csv file to dataframe. + + Reads from file: config.truck.friction_factors_file with following assumed column order: + time: Time + vsmtrk: Very Small Truck FF + smltrk: Small Truck FF + medtrk: Medium Truck FF + lrgtrk: Large Truck FF + """ + _file_path = self.get_abs_path(self.config.friction_factors_file) + return pd.read_csv(_file_path) + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + pass + + @LogStartEnd() + def run(self, tripends_df) -> Dict[str, NumpyArray]: + """Run commercial vehicle trip distribution.""" + daily_demand_dict = { + tc: self._distribute_ods(tripends_df, tc) for tc in self.component.classes + } + + return daily_demand_dict + + @LogStartEnd(level="DEBUG") + def _distribute_ods( + self, + tripends_df: pd.DataFrame, + trk_class: str, + orig_factor: float = 0.5, + dest_factor: float = 0.5, + ) -> NumpyArray: + """Distribute a trip ends for a given a truck class. + + Args: + tripends_df: dataframe with trip ends as "{trk_class}_prod" and{trk_class}_attr". + trk_class: name of truck class to distribute. + orig_factor (float, optional): Amount to factor towards origins. Defaults to 0.5. + dest_factor (float, optional): Amount to factor towards destinations. Defaults to 0.5. + + Returns: + NumpyArray: Distributed trip ends for given truck class + """ + if orig_factor + dest_factor != 1.0: + raise ValueError( + "orig_factor ({orig_factor}) and dest_factor ({dest_factor}) must\ + sum to 1.0" + ) + + _prod_attr_matrix = self._matrix_balancing( + tripends_df[f"{trk_class}_productions"].to_numpy(), + tripends_df[f"{trk_class}_attractions"].to_numpy(), + trk_class, + ) + daily_demand = ( + orig_factor * _prod_attr_matrix + + dest_factor * _prod_attr_matrix.transpose() + ) + + self.logger.log( + f"{trk_class}, prod sum: {_prod_attr_matrix.sum()}, " + f"daily sum: {daily_demand.sum()}", + level="DEBUG", + ) + + return daily_demand + + def _matrix_balancing( + self, + orig_totals: NumpyArray, + dest_totals: NumpyArray, + trk_class: str, + ) -> NumpyArray: + """Distribute origins and destinations based on friction factors for a givein truck class. + + Args: + orig_totals: Total demand for origins as a numpy array + dest_totals: Total demand for destinations as a numpy array + trk_class (str): Truck class name + + + + """ + matrix_balancing = self.controller.emme_manager.modeller.tool( + "inro.emme.matrix_calculation.matrix_balancing" + ) + matrix_round = self.controller.emme_manager.modeller.tool( + "inro.emme.matrix_calculation.matrix_controlled_rounding" + ) + + # Transfer numpy to emmebank + _ff_emme_mx_name = self.component.matrix_cache.set_data( + f"{trk_class}_friction", + self.friction_factor_matrices(trk_class), + matrix_type="FULL", + ).name + + _orig_tots_emme_mx_name = self.component.matrix_cache.set_data( + f"{trk_class}_prod", orig_totals, matrix_type="ORIGIN" + ).name + + _dest_tots_emme_mx_name = self.component.matrix_cache.set_data( + f"{trk_class}_attr", dest_totals, matrix_type="DESTINATION" + ).name + + # Create a destination matrix for output to live in Emmebank + _result_emme_mx_name = self.component.matrix_cache.get_or_init_matrix( + f"{trk_class}_daily_demand" + ).name + + spec = { + "od_values_to_balance": _ff_emme_mx_name, + "origin_totals": _orig_tots_emme_mx_name, + "destination_totals": _dest_tots_emme_mx_name, + "allowable_difference": 0.01, + "max_relative_error": self.config.max_balance_relative_error, + "max_iterations": self.config.max_balance_iterations, + "results": {"od_balanced_values": _result_emme_mx_name}, + "performance_settings": { + "allowed_memory": None, + "number_of_processors": self.controller.num_processors, + }, + "type": "MATRIX_BALANCING", + } + matrix_balancing(spec, scenario=self.component.emme_scenario) + + matrix_round( + _result_emme_mx_name, + _result_emme_mx_name, + min_demand=0.01, + values_to_round="ALL_NON_ZERO", + scenario=self.component.emme_scenario, + ) + + return self.component.matrix_cache.get_data(_result_emme_mx_name) + + +class CommercialVehicleTimeOfDay(Subcomponent): + """Commercial vehicle (truck) Time of Day Split for 4 sizes of truck. + + Input: Trips origin and destination matrices by 4 truck sizes + Ouput: 20 trips origin and destination matrices by 4 truck sizes by 5 times periods + + Note: + The diurnal factors are taken from the BAYCAST-90 model with adjustments made + during calibration to the very small truck values to better match counts. + """ + + def __init__(self, controller: RunController, component: Component): + """Constructor for the CommercialVehicleTimeOfDay component. + + Args: + controller (RunController): Run controller for model run. + component (Component): Parent component of sub-component + """ + super().__init__(controller, component) + + self.config = self.component.config.time_of_day + + self.split_factor = "od" + self._class_configs = None + self._class_period_splits = None + + @property + def time_periods(self): + return self.controller.config.time_periods + + @property + def classes(self): + return [trk_class.name for trk_class in self.config.classes] + + @property + def class_configs(self): + if not self._class_configs: + self._class_configs = {c.name: c for c in self.config.classes} + return self._class_configs + + @property + def class_period_splits(self): + """Returns split fraction dictonary mapped to [time period class][time period].""" + if not self._class_period_splits: + self._class_period_splits = { + c_name: {c.time_period: c for c in config.time_period_split} + for c_name, config in self.class_configs.items() + } + + return self._class_period_splits + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + pass + + @LogStartEnd() + def run( + self, daily_demand: Dict[str, NumpyArray] + ) -> Dict[str, Dict[str, NumpyArray]]: + """Splits the daily demand by time of day based on factors in the config. + + Uses self.config.truck.classes.{class_name}.time_of_day_split to split the daily demand. + + #TODO use TimePeriodSplit + Args: + daily_demand: dictionary of truck type name to numpy array of + truck type daily demand + + Returns: + Nested dictionary of truck class: time period name => numpy array of demand + """ + trkclass_tp_demand_dict = defaultdict(dict) + + _class_timeperiod = itertools.product(self.classes, self.time_period_names) + + for _t_class, _tp in _class_timeperiod: + trkclass_tp_demand_dict[_t_class][_tp] = np.around( + self.class_period_splits[_t_class][_tp.lower()][self.split_factor] + * daily_demand[_t_class], + decimals=2, + ) + + return trkclass_tp_demand_dict + + +class CommercialVehicleTollChoice(Subcomponent): + """Commercial vehicle (truck) toll choice. + + A binomial choice model for very small, small, medium, and large trucks. + A separate value toll paying versus no value toll paying path choice + model is applied to each of the twenty time period and vehicle type combinations. + + Input: (1) Trip tables by time of day and truck class + (2) Skims providing the time and cost for value toll and non-value toll paths + for each; the matrix names in the OMX files are: + "{period}_{cls_name}_time" + "{period}_{cls_name}_dist" + "{period}_{cls_name}_bridgetoll{grp_name}" + "{period}_{cls_name}toll_time" + "{period}_{cls_name}toll_dist" + "{period}_{cls_name}toll_bridgetoll{grp_name}" + "{period}_{cls_name}toll_valuetoll{grp_name}" + Where period is the assignment period, cls_name is the truck assignment + class name (as very small, small and medium truck are assigned as the + same class) and grp_name is the truck type name (as the tolls are + calculated separately for very small, small and medium). + + Results: a total of forty demand matrices, by time of day, truck type and toll/non-toll. + + Notes: (1) TOLLCLASS is a code, 1 through 10 are reserved for bridges; 11 and up is + reserved for value toll facilities. + (2) All costs should be coded in year 2000 cents + (3) The 2-axle fee is used for very small trucks + (4) The 2-axle fee is used for small trucks + (5) The 3-axle fee is used for medium trucks + (6) The average of the 5-axle and 6-axle fee is used for large trucks + (about the midpoint of the fee schedule). + (7) The in-vehicle time coefficient is from the work trip mode choice model. + """ + + def __init__(self, controller, component): + """Constructor for Commercial Vehicle Toll Choice. + + Also calls Subclass __init__(). + + Args: + controller: model run controller + component: parent component + """ + super().__init__(controller, component) + + self.config = self.component.config.toll_choice + + self.sub_components = { + "toll choice calculator": TollChoiceCalculator( + controller, + self, + self.config, + ), + } + + # shortcut + self._toll_choice = self.sub_components["toll choice calculator"] + self._toll_choice.toll_skim_suffix = "trk" + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + pass + + @LogStartEnd() + def run(self, trkclass_tp_demand_dict): + """Split per-period truck demands into nontoll and toll classes. + + Uses OMX skims output from highway assignment: traffic_skims_{period}.omx""" + + _tclass_time_combos = itertools.product( + self.time_period_names, self.config.classes + ) + + class_demands = defaultdict(dict) + for _time_period, _tclass in _tclass_time_combos: + + _split_demand = self._toll_choice.run( + trkclass_tp_demand_dict[_tclass.name][_time_period], + _tclass.name, + _time_period, + ) + + class_demands[_time_period][_tclass.name] = _split_demand["non toll"] + class_demands[_time_period][f"{_tclass.name}toll"] = _split_demand["toll"] + return class_demands diff --git a/tm2py/components/demand/household.py b/tm2py/components/demand/household.py index e69de29b..d1bb0e47 100644 --- a/tm2py/components/demand/household.py +++ b/tm2py/components/demand/household.py @@ -0,0 +1,207 @@ +"""Placeholder docstring for CT-RAMP related components for household residents' model.""" + +import shutil as _shutil + +import openmatrix as omx + +from tm2py.components.component import Component +from tm2py.logger import LogStartEnd +from tm2py.tools import run_process +from tm2py.components.demand.prepare_demand import PrepareHighwayDemand + + +class HouseholdModel(Component): + """Run household resident model.""" + + def validate_inputs(self): + """Validates inputs for component.""" + pass + + @LogStartEnd() + def run(self): + """Run the the household resident travel demand model. + + Steps: + 1. Starts household manager. + 2. Starts matrix manager. + 3. Starts resident travel model (CTRAMP). + 4. Cleans up CTRAMP java. + """ + self.config = self.controller.config.household + self._start_household_manager() + self._start_matrix_manager() + self._run_resident_model() + self._stop_java() + #self._consolidate_demand_for_assign() + self._prepare_demand_for_assignment() + + def _prepare_demand_for_assignment(self): + prep_demand = PrepareHighwayDemand(self.controller) + prep_demand.prepare_household_demand() + + def _start_household_manager(self): + commands = [ + f"cd /d {self.controller.run_dir}", + f"CALL {self.controller.run_dir}\\CTRAMP\\runtime\\CTRampEnv.bat", + "set PATH=%CD%\\CTRAMP\\runtime;C:\\Windows\\System32;%JAVA_PATH%\\bin;" + "%TPP_PATH%;%PYTHON_PATH%;%PYTHON_PATH%\\condabin;%PYTHON_PATH%\\envs", + f"CALL {self.controller.run_dir}\\CTRAMP\\runtime\\runHhMgr.cmd %JAVA_PATH% %HOST_IP_ADDRESS%", + ] + run_process(commands, name="start_household_manager") + + def _start_matrix_manager(self): + commands = [ + f"cd /d {self.controller.run_dir}", + f"CALL {self.controller.run_dir}\\CTRAMP\\runtime\\CTRampEnv.bat", + "set PATH=%CD%\\CTRAMP\\runtime;C:\\Windows\\System32;%JAVA_PATH%\\bin;" + "%TPP_PATH%;%PYTHON_PATH%;%PYTHON_PATH%\\condabin;%PYTHON_PATH%\\envs", + f"CALL {self.controller.run_dir}\\CTRAMP\\runtime\\runMtxMgr.cmd %HOST_IP_ADDRESS% %JAVA_PATH%", + ] + run_process(commands, name="start_matrix_manager") + + def _run_resident_model(self): + sample_rate_iteration = {1: 0.05, 2: 0.5, 3: 1, 4: 0.02, 5: 0.02} + iteration = self.controller.iteration + sample_rate = sample_rate_iteration[iteration] + _shutil.copyfile("CTRAMP\\runtime\\mtctm2.properties", "mtctm2.properties") + commands = [ + f"cd /d {self.controller.run_dir}", + f"CALL {self.controller.run_dir}\\CTRAMP\\runtime\\CTRampEnv.bat", + "set PATH=%CD%\\CTRAMP\\runtime;C:\\Windows\\System32;%JAVA_PATH%\\bin;" + "%TPP_PATH%;%PYTHON_PATH%;%PYTHON_PATH%\\condabin;%PYTHON_PATH%\\envs", + f"CALL {self.controller.run_dir}\\CTRAMP\\runtime\\runMTCTM2ABM.cmd {sample_rate} {iteration} %JAVA_PATH%", + ] + run_process(commands, name="run_resident_model") + + @staticmethod + def _stop_java(): + run_process(['taskkill /im "java.exe" /F']) + + def _consolidate_demand_for_assign(self): + """ + CTRAMP writes out demands in separate omx files, e.g. + ctramp_output\\auto_@p@_SOV_GP_@p@.mat + ctramp_output\\auto_@p@_SOV_PAY_@p@.mat + ctramp_output\\auto_@p@_SR2_GP_@p@.mat + ctramp_output\\auto_@p@_SR2_HOV_@p@.mat + ctramp_output\\auto_@p@_SR2_PAY_@p@.mat + ctramp_output\\auto_@p@_SR3_GP_@p@.mat + ctramp_output\\auto_@p@_SR3_HOV_@p@.mat + ctramp_output\\auto_@p@_SR3_PAY_@p@.mat + ctramp_output\\Nonmotor_@p@_BIKE_@p@.mat + ctramp_output\\Nonmotor_@p@_WALK_@p@.mat + ctramp_output\\other_@p@_SCHLBUS_@p@.mat + + Need to combine demands for one period into one omx file. + """ + time_period_names = self.time_period_names + + # auto TAZ + for period in time_period_names: + output_path = ( + self.controller.get_abs_path(self.config.highway_demand_file) + .__str__() + .format(period=period, iter=self.controller.iteration) + ) + output_omx = omx.open_file(output_path, "w") + for mode_agg in self.config.mode_agg: + if mode_agg.name == "transit": + continue + for mode in mode_agg.modes: + input_path = ( + self.controller.get_abs_path( + self.config.highway_taz_ctramp_output_file + ) + .__str__() + .format(period=period, mode_agg=mode_agg.name, mode=mode) + ) + input_omx = omx.open_file(input_path, "r") + core_name = mode + "_" + period.upper() + output_omx[core_name] = input_omx[core_name][:, :] + input_omx.close() + + output_omx.close() + + # auto MAZ + for period in time_period_names: + for maz_group in [1, 2, 3]: + output_path = ( + self.controller.get_abs_path( + self.controller.config.highway.maz_to_maz.demand_file + ) + .__str__() + .format( + period=period, number=maz_group, iter=self.controller.iteration + ) + ) + + input_path = ( + self.controller.get_abs_path( + self.config.highway_maz_ctramp_output_file + ) + .__str__() + .format(period=period, number=maz_group) + ) + + _shutil.copyfile(input_path, output_path) + + # transit TAP + #for period in time_period_names: + # for set in ["set1", "set2", "set3"]: + # output_path = ( + # self.controller.get_abs_path(self.config.transit_demand_file) + # .__str__() + # .format(period=period, iter=self.controller.iteration, set=set) + # ) + # output_omx = omx.open_file(output_path, "w") + # for mode_agg in self.config.mode_agg: + # if mode_agg.name != "transit": + # continue + # for mode in mode_agg.modes: + # input_path = ( + # self.controller.get_abs_path( + # self.config.transit_tap_ctramp_output_file + # ) + # .__str__() + # .format( + # period=period, + # mode_agg=mode_agg.name, + # mode=mode, + # set=set, + # ) + # ) + # input_omx = omx.open_file(input_path, "r") + # core_name = mode + "_TRN_" + set + "_" + period.upper() + # output_omx[core_name] = input_omx[core_name][:, :] + # input_omx.close() +# + # output_omx.close() + # transit TAZ + for period in time_period_names: + output_path = ( + self.controller.get_abs_path(self.config.transit_demand_file) + .__str__() + .format(period=period, iter=self.controller.iteration) + ) + output_omx = omx.open_file(output_path, "w") + for mode_agg in self.config.mode_agg: + if mode_agg.name != "transit": + continue + for mode in mode_agg.modes: + input_path = ( + self.controller.get_abs_path( + self.config.transit_taz_ctramp_output_file + ) + .__str__() + .format( + period=period, + mode_agg=mode_agg.name, + mode=mode, + ) + ) + input_omx = omx.open_file(input_path, "r") + core_name = mode + "_TRN_" + period.upper() + output_omx[core_name] = input_omx[core_name][:, :] + input_omx.close() + + output_omx.close() \ No newline at end of file diff --git a/tm2py/components/demand/internal_external.py b/tm2py/components/demand/internal_external.py index e69de29b..febc1e0c 100644 --- a/tm2py/components/demand/internal_external.py +++ b/tm2py/components/demand/internal_external.py @@ -0,0 +1,310 @@ +"""Module containing Internal <-> External trip model.""" + +from __future__ import annotations + +import itertools +import os +from collections import defaultdict +from typing import TYPE_CHECKING, Dict + +import numpy as np +import openmatrix as _omx + +from tm2py.components.component import Component, Subcomponent +from tm2py.components.demand.toll_choice import TollChoiceCalculator +from tm2py.components.time_of_day import TimePeriodSplit +from tm2py.emme.matrix import OMXManager +from tm2py.logger import LogStartEnd +from tm2py.matrix import create_matrix_factors +from tm2py.omx import omx_to_dict + +if TYPE_CHECKING: + from tm2py.controller import RunController + +NumpyArray = np.array + + +class InternalExternal(Component): + """Develop Internal <-> External trip tables from land use and impedances. + + 1. Grow demand from base year using static rates ::ExternalDemand + 2. Split by time of day using static factors ::TimePeriodSplit + 3. Apply basic toll binomial choice model: ::ExternalTollChoice + + Governed by InternalExternalConfig: + highway_demand_file: + input_demand_file: + input_demand_matrixname_tmpl: + modes: + reference_year: + annual_growth_rate: List[MatrixFactorConfig] + time_of_day: TimeOfDayConfig + toll_choice: TollChoiceConfig + special_gateway_adjust: Optional[List[MatrixFactorConfig]] + """ + + def __init__(self, controller: "RunController"): + super().__init__(controller) + self.config = self.controller.config.internal_external + + self.sub_components = { + "demand forecast": ExternalDemand(controller, self), + "time of day": TimePeriodSplit( + controller, self, self.config.time_of_day.classes[0].time_period_split + ), + "toll choice": ExternalTollChoice(controller, self), + } + + @property + def classes(self): + return self.config.modes + + def validate_inputs(self): + """Validate inputs to component.""" + ## TODO + pass + + @LogStartEnd() + def run(self): + """Run internal/external travel demand component.""" + + daily_demand = self.sub_components["demand forecast"].run() + period_demand = self.sub_components["time of day"].run(daily_demand) + class_demands = self.sub_components["toll choice"].run(period_demand) + self._export_results(class_demands) + + @LogStartEnd() + def _export_results(self, demand: Dict[str, Dict[str, NumpyArray]]): + """Export assignable class demands to OMX files by time-of-day.""" + outdir = self.get_abs_path(self.config.output_trip_table_directory) + os.makedirs(outdir, exist_ok=True) + for period, matrices in demand.items(): + with OMXManager( + os.path.join( + outdir, self.config.outfile_trip_table_tmp.format(period=period) + ), + "w", + ) as output_file: + for name, data in matrices.items(): + output_file.write_array(data, name) + + +class ExternalDemand(Subcomponent): + """Forecast of daily internal<->external demand based on growth from a base year. + + Create a daily matrix that includes internal/external, external/internal, + and external/external passenger vehicle travel (based on Census 2000 journey-to-work flows). + These trip tables are based on total traffic counts, which include trucks, but trucks are + not explicitly segmented from passenger vehicles. This short-coming is a hold-over from + BAYCAST and will be addressed in the next model update. + + The row and column totals are taken from count station data provided by Caltrans. The + BAYCAST 2006 IX matrix is used as the base matrix and scaled to match forecast year growth + assumptions. The script generates estimates for the model forecast year; the growth rates + were discussed with neighboring MPOs as part of the SB 375 target setting process. + + Input: (1) Station-specific assumed growth rates for each forecast year (the lack of + external/external movements through the region allows simple factoring of + cells without re-balancing); + (2) An input base matrix derived from the Census journey-to-work data. + + Output: (1) Four-table, forecast-year specific trip tables containing internal/external, + external/internal, and external/external vehicle (xxx or person xxx) travel. + + + Governed by class DemandGrowth Config: + ``` + highway_demand_file: + input_demand_file: + input_demand_matrixname_tmpl: + modes: + reference_year: + annual_growth_rate: + special_gateway_adjust: + ``` + """ + + def __init__(self, controller, component): + + super().__init__(controller, component) + self.config = self.component.config.demand + # Loaded lazily + self._base_demand = None + + @property + def year(self): + return self.controller.config.scenario.year + + @property + def modes(self): + return self.component.classes + + @property + def input_demand_file(self): + return self.get_abs_path(self.config.input_demand_file) + + @property + def base_demand(self): + if self._base_demand is None: + self._load_base_demand() + return self._base_demand + + def validate_inputs(self): + # TODO + pass + + def _load_base_demand(self): + """Load reference matrices from .omx to self._base_demand + + input file template: self.config.internal_external.input_demand_matrixname_tmpl + modes: self.config.internal_external.modes + """ + _mx_name_tmpl = self.config.input_demand_matrixname_tmpl + _matrices = {m: _mx_name_tmpl.format(mode=m.upper()) for m in self.modes} + + self._base_demand = omx_to_dict(self.input_demand_file, matrices=_matrices) + + def run(self, base_demand: Dict[str, NumpyArray] = None) -> Dict[str, NumpyArray]: + """Calculate adjusted demand based on scenario year and growth rates. + + Steps: + - 1.1 apply special factors to certain gateways based on ID + - 1.2 apply gateway-specific annual growth rates to results of step 1 + to generate year specific forecast + + Args: + demand: dictionary of input daily demand matrices (numpy arrays) + + Returns: + Dictionary of Numpy matrices of daily PA by class mode + """ + # Build adjustment matrix to be applied to all input matrices + # special gateway adjustments based on zone index + if base_demand is None: + base_demand = self.base_demand + _num_years = self.year - self.config.reference_year + _adj_matrix = np.ones(base_demand["da"].shape) + + _adj_matrix = create_matrix_factors( + default_matrix=_adj_matrix, + matrix_factors=self.config.special_gateway_adjust, + ) + + _adj_matrix = create_matrix_factors( + default_matrix=_adj_matrix, + matrix_factors=self.config.annual_growth_rate, + periods=_num_years, + ) + + daily_prod_attract = dict( + (_mode, _demand * _adj_matrix) for _mode, _demand in base_demand.items() + ) + return daily_prod_attract + + +class ExternalTollChoice(Subcomponent): + """Toll choice + ----------- + Apply a binomial choice model for drive alone, shared ride 2, and shared ride 3 + internal/external personal vehicle travel. + + Input: (1) Time-period-specific origin/destination matrices of drive alone, shared ride 2, + and share ride 3+ internal/external trip tables. + (2) Skims providing the time and cost for value toll and non-value toll paths for each + + traffic_skims_{period}.omx, where {period} is the time period ID, + {class} is the class name da, sr2, sr2, with the following matrix names + Non-value-toll paying time: {period}_{class}_time, + Non-value-toll distance: {period}_{class}_dist, + Non-value-toll bridge toll is: {period}_{class}_bridgetoll_{class}, + Value-toll paying time is: {period}_{class}toll_time, + Value-toll paying distance is: {period}_{class}toll_dist, + Value-toll bridge toll is: {period}_{class}toll_bridgetoll_{class}, + Value-toll value toll is: {period}_{class}toll_valuetoll_{class}, + + Output: Five, six-table trip matrices, one for each time period. Two tables for each vehicle + class representing value-toll paying path trips and non-value-toll paying path trips + + Governed by TollClassConfig: + + ``` + classes: + value_of_time: + operating_cost_per_mile: + property_to_skim_toll: + property_to_skim_notoll: + utility: + ``` + """ + + def __init__(self, controller, component): + super().__init__(controller, component) + + self.config = self.component.config.toll_choice + + self.sub_components = { + "toll choice calculator": TollChoiceCalculator( + controller, component, self.config + ), + } + + # shortcut + self._toll_choice = self.sub_components["toll choice calculator"] + self._toll_choice.toll_skim_suffix = "trk" + + def validate_inputs(self): + # TODO + pass + + @LogStartEnd() + def run( + self, period_demand: Dict[str, Dict[str, NumpyArray]] + ) -> Dict[str, Dict[str, NumpyArray]]: + """Binary toll / non-toll choice model by class. + + input: result of _ix_time_of_day + skims: + traffic_skims_{period}.omx, where {period} is the time period ID, + {class} is the class name da, sr2, sr2, with the following matrix names + Non-value-toll paying time: {period}_{class}_time, + Non-value-toll distance: {period}_{class}_dist, + Non-value-toll bridge toll is: {period}_{class}_bridgetoll_{class}, + Value-toll paying time is: {period}_{class}toll_time, + Value-toll paying distance is: {period}_{class}toll_dist, + Value-toll bridge toll is: {period}_{class}toll_bridgetoll_{class}, + Value-toll value toll is: {period}_{class}toll_valuetoll_{class}, + + STEPS: + 3.1: For each time of day, for each da, sr2, sr3, calculate + - utility of toll and nontoll + - probability of toll / nontoll + - split demand into toll and nontoll matrices + + """ + + _time_class_combos = itertools.product( + self.time_period_names, self.component.classes + ) + + class_demands = defaultdict(dict) + for _time_period, _class in _time_class_combos: + + if _time_period in period_demand.keys(): + None + elif _time_period.lower() in period_demand.keys(): + _time_period = _time_period.lower() + elif _time_period.upper() in period_demand.keys(): + _time_period = _time_period.upper() + else: + raise ValueError( + f"Period {_time_period} not an available time period.\ + Available periods are: {period_demand.keys()}" + ) + + _split_demand = self._toll_choice.run( + period_demand[_time_period][_class], _class, _time_period + ) + + class_demands[_time_period][_class] = _split_demand["non toll"] + class_demands[_time_period][f"{_class}toll"] = _split_demand["toll"] + return class_demands diff --git a/tm2py/components/demand/prepare_demand.py b/tm2py/components/demand/prepare_demand.py new file mode 100644 index 00000000..2347f48f --- /dev/null +++ b/tm2py/components/demand/prepare_demand.py @@ -0,0 +1,545 @@ +"""Demand loading from OMX to Emme database.""" + +from __future__ import annotations + +import itertools +from abc import ABC +from typing import TYPE_CHECKING, Dict, List, Union +import pathlib + +import numpy as np +import pandas as pd + +from tm2py.components.component import Component, Subcomponent +from tm2py.emme.manager import Emmebank +from tm2py.emme.matrix import OMXManager +from tm2py.logger import LogStartEnd +from tm2py.matrix import redim_matrix +from collections import defaultdict + + +if TYPE_CHECKING: + from tm2py.controller import RunController + +NumpyArray = np.array + + +class EmmeDemand: + """Abstract base class to import and average demand.""" + + def __init__(self, controller: RunController): + """Constructor for PrepareDemand class. + + Args: + controller (RunController): Run controller for the current run. + """ + self.controller = controller + self._emmebank = None + self._scenario = None + self._source_ref_key = None + + @property + def logger(self): + """Reference to logger.""" + return self.controller.logger + + def _read( + self, path: str, name: str, num_zones, factor: float = None + ) -> NumpyArray: + """Read matrix array from OMX file at path with name, and multiple by factor (if specified). + + Args: + path: full path to OMX file + name: name of the OMX matrix / key + factor: optional factor to apply to matrix + """ + with OMXManager(path, "r") as omx_file: + demand = omx_file.read(name) + omx_file.close() + if factor is not None: + demand = factor * demand + demand = self._redim_demand(demand, num_zones) + # self.logger.log(f"{name} sum: {demand.sum()}", level=3) + return demand + + @staticmethod + def _redim_demand(demand, num_zones): + _shape = demand.shape + if _shape < (num_zones, num_zones): + demand = np.pad( + demand, ((0, num_zones - _shape[0]), (0, num_zones - _shape[1])) + ) + elif _shape > (num_zones, num_zones): + ValueError( + f"Provided demand matrix is larger ({_shape}) than the \ + specified number of zones: {num_zones}" + ) + + return demand + + def _save_demand( + self, + name: str, + demand: NumpyArray, + description: str = None, + apply_msa: bool = False, + ): + """Save demand array to Emme matrix with name, optional description. + + Matrix will be created if it does not exist and the model is on iteration 0. + + Args: + name: name of the matrix in the Emmebank + demand: NumpyArray, demand array to save + description: str, optional description to use in the Emmebank + apply_msa: bool, default False: use MSA on matrix with current array + values if model is on iteration >= 1 + """ + matrix = self._emmebank.emmebank.matrix(f'mf"{name}"') + msa_iteration = self.controller.iteration + if not apply_msa or msa_iteration <= 1: + if not matrix: + ident = self._emmebank.emmebank.available_matrix_identifier("FULL") + matrix = self._emmebank.emmebank.create_matrix(ident) + matrix.name = name + if description is not None: + matrix.description = description + else: + if not matrix: + raise Exception(f"error averaging demand: matrix {name} does not exist") + prev_demand = matrix.get_numpy_data(self._scenario.id) + demand = prev_demand + (1.0 / msa_iteration) * (demand - prev_demand) + self.logger.log(f"{name} sum: {demand.sum()}", level="DEBUG") + matrix.set_numpy_data(demand, self._scenario.id) + + +def avg_matrix_msa( + prev_avg_matrix: NumpyArray, this_iter_matrix: NumpyArray, msa_iteration: int +) -> NumpyArray: + """Average matrices based on Method of Successive Averages (MSA). + + Args: + prev_avg_matrix (NumpyArray): Previously averaged matrix + this_iter_matrix (NumpyArray): Matrix for this iteration + msa_iteration (int): MSA iteration + + Returns: + NumpyArray: MSA Averaged matrix for this iteration. + """ + if msa_iteration < 1: + return this_iter_matrix + result_matrix = prev_avg_matrix + (1.0 / msa_iteration) * ( + this_iter_matrix - prev_avg_matrix + ) + return result_matrix + + +class PrepareHighwayDemand(EmmeDemand): + """Import and average highway demand. + + Demand is imported from OMX files based on reference file paths and OMX + matrix names in highway assignment config (highway.classes). + The demand is average using MSA with the current demand matrices + (in the Emmebank) if the controller.iteration > 1. + + Args: + controller: parent RunController object + """ + + def __init__(self, controller: RunController): + """Constructor for PrepareHighwayDemand. + + Args: + controller (RunController): Reference to run controller object. + """ + super().__init__(controller) + self.controller = controller + self.config = self.controller.config.highway + self._highway_emmebank = None + + def validate_inputs(self): + # TODO + pass + + @property + def highway_emmebank(self): + if self._highway_emmebank == None: + self._highway_emmebank = self.controller.emme_manager.highway_emmebank + self._emmebank = self._highway_emmebank + return self._highway_emmebank + + # @LogStartEnd("prepare highway demand") + def run(self): + """Open combined demand OMX files from demand models and prepare for assignment.""" + + self.highway_emmebank.zero_matrix + for time in self.controller.time_period_names: + for klass in self.config.classes: + self._prepare_demand(klass.name, klass.description, klass.demand, time) + + def _prepare_demand( + self, + name: str, + description: str, + demand_config: List[Dict[str, Union[str, float]]], + time_period: str, + ): + """Load demand from OMX files and save to Emme matrix for highway assignment. + + Average with previous demand (MSA) if the current iteration > 1 + + Args: + name (str): the name of the highway assignment class + description (str): the description for the highway assignment class + demand_config (dict): the list of file cross-reference(s) for the demand to be loaded + {"source": , + "name": , + "factor": } + time_period (str): the time time_period ID (name) + """ + self._scenario = self.highway_emmebank.scenario(time_period) + num_zones = len(self._scenario.zone_numbers) + demand = self._read_demand(demand_config[0], time_period, num_zones) + for file_config in demand_config[1:]: + demand = demand + self._read_demand(file_config, time_period, num_zones) + demand_name = f"{time_period}_{name}" + description = f"{time_period} {description} demand" + self._save_demand(demand_name, demand, description, apply_msa=True) + + def _read_demand(self, file_config, time_period, num_zones): + # Load demand from cross-referenced source file, + # the named demand model component under the key highway_demand_file + source = file_config["source"] + name = file_config["name"].format(period=time_period.upper()) + path = self.controller.get_abs_path( + self.controller.config[source].highway_demand_file + ).__str__() + return self._read( + path.format(period=time_period, iter=self.controller.iteration), + name, + num_zones, + ) + @LogStartEnd("Prepare household demand matrices.") + def prepare_household_demand(self): + """Prepares highway and transit household demand matrices from trip lists produced by CT-RAMP. + """ + iteration = self.controller.iteration + + # Create folders if they don't exist + pathlib.Path(self.controller.get_abs_path(self.controller.config.household.highway_demand_file)).parents[0].mkdir(parents=True, exist_ok=True) + pathlib.Path(self.controller.get_abs_path(self.controller.config.household.transit_demand_file)).parents[0].mkdir(parents=True, exist_ok=True) + # pathlib.Path(self.controller.get_abs_path(self.controller.config.household.active_demand_file)).parents[0].mkdir(parents=True, exist_ok=True) + + + indiv_trip_file = self.controller.config.household.ctramp_indiv_trip_file.format(iteration = iteration) + joint_trip_file = self.controller.config.household.ctramp_joint_trip_file.format(iteration = iteration) + it_full, jt_full = pd.read_csv(indiv_trip_file), pd.read_csv(joint_trip_file) + + # Add time period, expanded count + time_period_start = dict(zip( + [c.name.upper() for c in self.controller.config.time_periods], + [c.start_period for c in self.controller.config.time_periods])) + # the last time period needs to be filled in because the first period may or may not start at midnight + time_periods_sorted = sorted(time_period_start, key = lambda x:time_period_start[x]) # in upper case + first_period = time_periods_sorted[0] + periods_except_last = time_periods_sorted[:-1] + breakpoints = [time_period_start[tp] for tp in time_periods_sorted] + it_full['time_period'] = pd.cut(it_full.stop_period, breakpoints, right = False, labels = periods_except_last).cat.add_categories(time_periods_sorted[-1]).fillna(time_periods_sorted[-1]).astype(str) + jt_full['time_period'] = pd.cut(jt_full.stop_period, breakpoints, right = False, labels = periods_except_last).cat.add_categories(time_periods_sorted[-1]).fillna(time_periods_sorted[-1]).astype(str) + it_full['eq_cnt'] = 1/it_full.sampleRate + it_full['eq_cnt'] = np.where(it_full['trip_mode'].isin([3,4,5]), 0.5 * it_full['eq_cnt'], np.where(it_full['trip_mode'].isin([6,7,8]), 0.35 * it_full['eq_cnt'], it_full['eq_cnt'])) + jt_full['eq_cnt'] = jt_full.num_participants/jt_full.sampleRate + zp_cav = self.controller.config.household.OwnedAV_ZPV_factor + zp_tnc = self.controller.config.household.TNC_ZPV_factor + + maz_taz_df = pd.read_csv(self.controller.get_abs_path(self.controller.config.scenario.landuse_file), usecols = ["MAZ", "TAZ"]) + it_full = it_full.merge(maz_taz_df, left_on = 'orig_mgra', right_on= 'MAZ', how = 'left').rename(columns={'TAZ': 'orig_taz'}) + it_full = it_full.merge(maz_taz_df, left_on = 'dest_mgra', right_on= 'MAZ', how = 'left').rename(columns={'TAZ': 'dest_taz'}) + jt_full = jt_full.merge(maz_taz_df, left_on = 'orig_mgra', right_on= 'MAZ', how = 'left').rename(columns={'TAZ': 'orig_taz'}) + jt_full = jt_full.merge(maz_taz_df, left_on = 'dest_mgra', right_on= 'MAZ', how = 'left').rename(columns={'TAZ': 'dest_taz'}) + it_full['trip_mode'] = np.where(it_full['trip_mode'] == 14, 13, it_full['trip_mode']) + jt_full['trip_mode'] = np.where(jt_full['trip_mode'] == 14, 13, jt_full['trip_mode']) + + num_zones = self.num_internal_zones + OD_full_index = pd.MultiIndex.from_product([range(1,num_zones + 1), range(1,num_zones + 1)]) + + def combine_trip_lists(it, jt, trip_mode): + # combines individual trip list and joint trip list + combined_trips = pd.concat([it[(it['trip_mode'] == trip_mode)], jt[(jt['trip_mode'] == trip_mode)]]) + combined_sum = combined_trips.groupby(['orig_taz','dest_taz'])['eq_cnt'].sum() + return combined_sum.reindex(OD_full_index, fill_value=0).unstack().values + + def create_zero_passenger_trips(trips, deadheading_factor, trip_modes=[1,2,3]): + zpv_trips = trips.loc[(trips['avAvailable']==1) & (trips['trip_mode'].isin(trip_modes))] + zpv_trips['eq_cnt'] = zpv_trips['eq_cnt'] * deadheading_factor + zpv_trips = zpv_trips.rename(columns={'dest_taz': 'orig_taz', + 'orig_taz': 'dest_taz'}) + return zpv_trips + + # create zero passenger trips for auto modes + if it_full['avAvailable'].sum()>0: + it_zpav_trp = create_zero_passenger_trips(it_full, zp_cav, trip_modes=[1,2,3]) + it_zptnc_trp = create_zero_passenger_trips(it_full, zp_tnc, trip_modes=[9]) + # Combining zero passenger trips to trip files + it_full = pd.concat([it_full, it_zpav_trp, it_zptnc_trp], ignore_index=True).reset_index(drop=True) + + if jt_full['avAvailable'].sum()>0: + jt_zpav_trp = create_zero_passenger_trips(jt_full, zp_cav, trip_modes=[1,2,3]) + jt_zptnc_trp = create_zero_passenger_trips(jt_full, zp_tnc, trip_modes=[9]) + # Combining zero passenger trips to trip files + jt_full = pd.concat([jt_full, jt_zpav_trp, jt_zptnc_trp], ignore_index=True).reset_index(drop=True) + + # read properties from config + + mode_name_dict = self.controller.config.household.ctramp_mode_names + income_segment_config = self.controller.config.household.income_segment + + if income_segment_config['enabled']: + # This only affects highway trip tables. + + hh_file = self.controller.config.household.ctramp_hh_file.format(iteration = iteration) + hh = pd.read_csv(hh_file, usecols = ['hh_id', 'income']) + it_full = it_full.merge(hh, on = 'hh_id', how = 'left') + jt_full = jt_full.merge(hh, on = 'hh_id', how = 'left') + + suffixes = income_segment_config['segment_suffixes'] + + it_full['income_seg'] = pd.cut(it_full['income'], right =False, + bins = income_segment_config['cutoffs'] + [float('inf')], + labels = suffixes).astype(str) + + jt_full['income_seg'] = pd.cut(jt_full['income'], right =False, + bins = income_segment_config['cutoffs'] + [float('inf')], + labels = suffixes).astype(str) + else: + it_full['income_seg'] = '' + jt_full['income_seg'] = '' + suffixes = [''] + + # groupby objects for combinations of time period - income segmentation, used for highway modes only + it_grp = it_full.groupby(['time_period', 'income_seg']) + jt_grp = jt_full.groupby(['time_period', 'income_seg']) + + for time_period in time_periods_sorted: + self.logger.debug(f"Producing household demand matrices for period {time_period}") + + highway_out_file = OMXManager( + self.controller.get_abs_path(self.controller.config.household.highway_demand_file).__str__().format(period=time_period, iter=self.controller.iteration), 'w') + transit_out_file = OMXManager( + self.controller.get_abs_path(self.controller.config.household.transit_demand_file).__str__().format(period=time_period), 'w') + #active_out_file = OMXManager( + # self.controller.get_abs_path(self.controller.config.household.active_demand_file).__str__().format(period=time_period), 'w') + + #hsr_trips_file = _omx.open_file( + # self.controller.get_abs_path(self.controller.config.household.hsr_demand_file).format(year=self.controller.config.scenario.year, period=time_period)) + + #interregional_trips_file = _omx.open_file( + # self.controller.get_abs_path(self.controller.config.household.interregional_demand_file).format(year=self.controller.config.scenario.year, period=time_period)) + + highway_out_file.open() + transit_out_file.open() + #active_out_file.open() + + + # Transit and active modes: one matrix per time period per mode + it = it_full[it_full.time_period == time_period] + jt = jt_full[jt_full.time_period == time_period] + + for trip_mode in mode_name_dict: +# if trip_mode in [9,10]: +# matrix_name = mode_name_dict[trip_mode] +# self.logger.debug(f"Writing out mode {mode_name_dict[trip_mode]}") +# active_out_file.write_array(numpy_array=combine_trip_lists(it,jt, trip_mode), name = matrix_name) + + if trip_mode == 11: + matrix_name = "WLK_TRN_WLK" + self.logger.debug(f"Writing out mode WLK_TRN_WLK") + #other_trn_trips = np.array(hsr_trips_file[matrix_name])+np.array(interregional_trips_file[matrix_name]) + transit_out_file.write_array(numpy_array=(combine_trip_lists(it,jt, trip_mode)), name = matrix_name) + + elif trip_mode in [12,13]: + it_outbound, it_inbound = it[it.inbound == 0], it[it.inbound == 1] + jt_outbound, jt_inbound = jt[jt.inbound == 0], jt[jt.inbound == 1] + + matrix_name = f'{mode_name_dict[trip_mode].upper()}_TRN_WLK' + #other_trn_trips = np.array(hsr_trips_file[matrix_name])+np.array(interregional_trips_file[matrix_name]) + self.logger.debug(f"Writing out mode {mode_name_dict[trip_mode].upper() + '_TRN_WLK'}") + transit_out_file.write_array( + numpy_array=(combine_trip_lists(it_outbound,jt_outbound, trip_mode)), + name = matrix_name) + + matrix_name = f'WLK_TRN_{mode_name_dict[trip_mode].upper()}' + #other_trn_trips = np.array(hsr_trips_file[matrix_name])+np.array(interregional_trips_file[matrix_name]) + self.logger.debug(f"Writing out mode {'WLK_TRN_' + mode_name_dict[trip_mode].upper()}") + transit_out_file.write_array( + numpy_array=(combine_trip_lists(it_inbound,jt_inbound, trip_mode)), + name = matrix_name) + + + # Highway modes: one matrix per suffix (income class) per time period per mode + for suffix in suffixes: + + highway_cache = {} + + if (time_period, suffix) in it_grp.groups.keys(): + it = it_grp.get_group((time_period, suffix)) + else: + it = pd.DataFrame(None, columns = it_full.columns) + + if (time_period, suffix) in jt_grp.groups.keys(): + jt = jt_grp.get_group((time_period, suffix)) + else: + jt = pd.DataFrame(None, columns = jt_full.columns) + + + for trip_mode in sorted(mode_name_dict): + # Python preserves keys in the order they are inserted but + # mode_name_dict originates from TOML, which does not guarantee + # that the ordering of keys is preserved. See + # https://github.com/toml-lang/toml/issues/162 + + if trip_mode in [1,2,3,4,5,6,7,8,9,10,15,16,17]: # currently hard-coded based on Travel Mode trip mode codes + highway_cache[mode_name_dict[trip_mode]] = combine_trip_lists(it,jt, trip_mode) + out_mode = f'{mode_name_dict[trip_mode].upper()}' + matrix_name =f'{out_mode}_{suffix}_{time_period.upper()}' if suffix else f'{out_mode}_{time_period.upper()}' + highway_out_file.write_array(numpy_array = highway_cache[mode_name_dict[trip_mode]], name = matrix_name) + + elif trip_mode in [15, 16]: + # identify the correct mode split factors for da, sr2, sr3 + self.logger.debug(f"Splitting ridehail trips into shared ride trips") + ridehail_split_factors = defaultdict(float) + splits = self.controller.config.household.rideshare_mode_split + for key in splits: + out_mode_split = self.controller.config.household.__dict__[f'{key}_split'] + for out_mode in out_mode_split: + ridehail_split_factors[out_mode] += out_mode_split[out_mode] * splits[key] + + ridehail_trips = combine_trip_lists(it,jt, trip_mode) + for out_mode in ridehail_split_factors: + matrix_name =f'{out_mode}_{suffix}' if suffix else out_mode + self.logger.debug(f"Writing out mode {out_mode}") + highway_cache[out_mode] += (ridehail_trips * ridehail_split_factors[out_mode]).astype(float).round(2) + highway_out_file.write_array(numpy_array = highway_cache[out_mode], name = matrix_name) + + highway_out_file.close() + transit_out_file.close() + #active_out_file.close() + + @property + def num_internal_zones(self): + df = pd.read_csv( + self.controller.get_abs_path(self.controller.config.scenario.landuse_file), usecols = [self.controller.config.scenario.landuse_index_column]) + return len(df['TAZ'].unique()) + + @property + def num_total_zones(self): + self._emmebank_path = self.controller.get_abs_path(self.controller.config.emme.highway_database_path) + self._emmebank = self.controller.emme_manager.emmebank(self._emmebank_path) + time_period = self.controller.config.time_periods[0].name + scenario = self.get_emme_scenario(self._emmebank.path, time_period) # any scenario id works + return len(scenario.zone_numbers) + +class PrepareTransitDemand(EmmeDemand): + """Import transit demand. + + Demand is imported from OMX files based on reference file paths and OMX + matrix names in transit assignment config (transit.classes). + The demand is average using MSA with the current demand matrices (in the + Emmebank) if transit.apply_msa_demand is true if the + controller.iteration > 1. + + """ + + def __init__(self, controller: "RunController"): + """Constructor for PrepareTransitDemand. + + Args: + controller: RunController object. + """ + super().__init__(controller) + self.controller = controller + self.config = self.controller.config.transit + self._transit_emmebank = None + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + + @property + def transit_emmebank(self): + if not self._transit_emmebank: + self._transit_emmebank = self.controller.emme_manager.transit_emmebank + self._emmebank = self._transit_emmebank + return self._transit_emmebank + + @LogStartEnd("Prepare transit demand") + def run(self): + """Open combined demand OMX files from demand models and prepare for assignment.""" + self._source_ref_key = "transit_demand_file" + self.transit_emmebank.zero_matrix + _time_period_tclass = itertools.product( + self.controller.time_period_names, self.config.classes + ) + for _time_period, _tclass in _time_period_tclass: + self._prepare_demand( + _tclass.skim_set_id, _tclass.description, _tclass.demand, _time_period + ) + + def _prepare_demand( + self, + name: str, + description: str, + demand_config: List[Dict[str, Union[str, float]]], + time_period: str, + ): + """Load demand from OMX files and save to Emme matrix for transit assignment. + + Average with previous demand (MSA) if the current iteration > 1 and + config.transit.apply_msa_demand is True + + Args: + name (str): the name of the transit assignment class in the OMX files, usually a number + description (str): the description for the transit assignment class + demand_config (dict): the list of file cross-reference(s) for the demand to be loaded + {"source": , + "name": , + "factor": } + time_period (str): the time _time_period ID (name) + """ + self._scenario = self.transit_emmebank.scenario(time_period) + num_zones = len(self._scenario.zone_numbers) + demand = self._read_demand(demand_config[0], time_period, name, num_zones) + for file_config in demand_config[1:]: + demand = demand + self._read_demand( + file_config, time_period, name, num_zones + ) + demand_name = f"TRN_{name}_{time_period}" + description = f"{time_period} {description} demand" + apply_msa = self.config.apply_msa_demand + self._save_demand(demand_name, demand, description, apply_msa=apply_msa) + + def _read_demand(self, file_config, time_period, skim_set, num_zones): + # Load demand from cross-referenced source file, + # the named demand model component under the key highway_demand_file + if (self.controller.config.run.warmstart.warmstart and + self.controller.iteration == 0 + ): + source = self.controller.config.run.warmstart + path = self.controller.get_abs_path( + source.household_transit_demand_file + ).__str__() + else: + source = file_config["source"] + path = self.controller.get_abs_path( + self.controller.config[source].transit_demand_file + ).__str__() + name = file_config["name"] + return self._read( + path.format( + period=time_period, + # set=skim_set, + # iter=self.controller.iteration + ), + name, + num_zones, + ) diff --git a/tm2py/components/demand/temp.ipynb b/tm2py/components/demand/temp.ipynb new file mode 100644 index 00000000..e69de29b diff --git a/tm2py/components/demand/toll_choice.py b/tm2py/components/demand/toll_choice.py new file mode 100644 index 00000000..d256d958 --- /dev/null +++ b/tm2py/components/demand/toll_choice.py @@ -0,0 +1,339 @@ +"""Toll Choice Model.""" +import itertools +import os +from ast import Num +from math import exp +from typing import TYPE_CHECKING, Collection, Dict, List, Mapping, Optional + +import numpy as np +import openmatrix as _omx +import pandas as pd + +from tm2py.components.component import Component, Subcomponent +from tm2py.components.network.skims import get_omx_skim_as_numpy, get_summed_skims +from tm2py.config import ChoiceClassConfig, TollChoiceConfig +from tm2py.emme.matrix import OMXManager +from tm2py.logger import LogStartEnd +from tm2py.omx import df_to_omx +from tm2py.tools import interpolate_dfs + +NumpyArray = np.array + +if TYPE_CHECKING: + from tm2py.controller import RunController + +DEFAULT_PROPERTY_SKIM_TOLL = { + "time": ["time"], + "distance": ["dist"], + "cost": ["bridgetoll", "valuetoll"], +} + +DEFAULT_PROPERTY_SKIM_NOTOLL = { + "time": ["time"], + "distance": ["dist"], + "cost": ["bridgetoll"], +} + + +class TollChoiceCalculator(Subcomponent): + """Implements toll choice calculations. + + Centralized implementation of Toll Choice calculations common to + Commercial and Internal-external sub models. Loads input skims + from OMXManager + + This subcomponent should be able to be configured solely within: + + TollChoiceConfig: + classes: List[ChoiceClassConfig] + value_of_time: float + operating_cost_per_mile: float + utility: Optional[List[CoefficientConfig]] = Field(default=None) + + ChoiceClassConfig: + name: str + skim_mode_notoll: Optional[str] = Field(default="da") + skim_mode_toll: Optional[str] = Field(default="datoll") + property_factors: Optional[List[CoefficientConfig]] = Field(default=None) + + CoefficientConfig: + name: str + coeff: Optional[float] = Field(default=None) + + Properties: + classes (Dict[str,TollClassConfig]): convenience access to TollChoiceConfig + utility (Dict[str,float]): access to all utility factors by property + + """ + + def __init__( + self, + controller: "RunController", + component: Component, + config: TollChoiceConfig, + ): + """Constructor for TollChoiceCalculator. + + Args: + controller: RunController object + component: Component which contains this subcomponent + config: TollChoiceConfig Instance + """ + super().__init__(controller, component) + + self.config = config + self._class_configs = None + + # Copy out parts of config that we want to update/manipulate + + # Fill in blanks with defaults + DEFAULT_PROPERTY_SKIM_TOLL.update(self.config.property_to_skim_toll) + self.property_to_skim_toll = DEFAULT_PROPERTY_SKIM_TOLL + DEFAULT_PROPERTY_SKIM_NOTOLL.update(self.config.property_to_skim_notoll) + self.property_to_skim_notoll = DEFAULT_PROPERTY_SKIM_NOTOLL + + self.utility = {x.property: x.coeff for x in config.utility} + # set utility for cost using value of time and distance using operating cost per mile + self.utility["cost"] = TollChoiceCalculator.calc_cost_coeff( + self.utility["time"], config.value_of_time + ) + + self.utility["distance"] = TollChoiceCalculator.calc_dist_coeff( + self.utility["cost"], + config.operating_cost_per_mile, + ) + + self.toll_skim_suffix = "" + + self._omx_manager = None + self._skim_dir = None + + self.skim_dir = self.get_abs_path( + self.controller.config.highway.output_skim_path + ) + + @property + def classes(self): + self.classes = {c.name: c for c in self.config.classes} + + @property + def class_config(self): + if not self._class_configs: + self._class_configs = {c.name: c for c in self.config.classes} + return self._class_configs + + @staticmethod + def calc_cost_coeff(time_coeff: float, value_of_time: float) -> float: + """Calculate cost coefficient from time coefficient and value of time.""" + # FIXME why is 0.6 here? + return (time_coeff / value_of_time) * 0.6 + + @staticmethod + def calc_dist_coeff(cost_coeff: float, operating_cost_per_mile: float) -> float: + """Calculate coefficient on distance skim from cost coefficient and operating cost.""" + return cost_coeff * operating_cost_per_mile + + @staticmethod + def calc_cost_coeff(time_coeff: float, value_of_time: float) -> float: + """Calculate cost coefficient from time coefficient and value of time.""" + # FIXME why is 0.6 here? + return (time_coeff / value_of_time) * 0.6 + + @property + def skim_dir(self): + """Return the directory where the skim matrices are located.""" + return self._skim_dir + + @skim_dir.setter + def skim_dir(self, value): + """Set the directory where the skim matrices are located. + + If the directory is different from previous directory, initialize on OMX manager + to manage skims. + """ + + if not os.path.isdir(value): + raise ValueError(f"{value} is not a valid skim directory") + if value != self._skim_dir: + self._omx_manager = OMXManager(value) + self._skim_dir = value + + @property + def omx_manager(self): + """Access to self._omx_manager.""" + return self._omx_manager + + def validate_inputs(self): + """Validate inputs.""" + if self.utility.get("cost"): + raise ValueError( + "Cost utility for toll choice should be set using value\ + of time config." + ) + + if self.utility.get("distance"): + raise ValueError( + "Distance utility for toll choice should be set using\ + operating cost config." + ) + + def run( + self, demand: NumpyArray, class_name: str, time_period: str + ) -> Dict[str, NumpyArray]: + """Split demand into toll / non toll based on time period and class name. + + Args: + demand (NumpyArray): Zone-by-zone demand to split into toll/non-toll + class_name (str): class name to find classConfig + time_period (str): Time period to use for calculating impedances + + Returns: + Dict[str,NumpyArray]: Dictionary mapping "toll" and "non toll" to NumpyArrays with + demand assigned to each. + """ + + prob_nontoll = self.calc_nontoll_prob(time_period, class_name) + + split_demand = { + "non toll": prob_nontoll * demand, + "toll": (1 - prob_nontoll) * demand, + } + + return split_demand + + def calc_nontoll_prob( + self, + time_period: str, + class_name: str, + ) -> NumpyArray: + """Calculates the non-toll probability using binary logit model, masking non avail options. + + Args: + time_period (str): time period abbreviation + class_name (str): _description_ + + Returns: + NumpyArray: Probability of choosing non-toll option for a given class and time period. + """ + + e_util_nontoll = self.calc_exp_util( + self.property_to_skim_notoll, + self.class_config[class_name], + time_period, + ) + + e_util_toll = self.calc_exp_util( + self.property_to_skim_toll, + self.class_config[class_name], + time_period, + toll=True, + ) + + prob_nontoll = e_util_nontoll / (e_util_toll + e_util_nontoll) + + prob_nontoll = self.mask_non_available( + prob_nontoll, + time_period, + self.class_config[class_name].skim_mode, + self.class_config[class_name].veh_group_name, + ) + + return prob_nontoll + + def calc_exp_util( + self, + prop_to_skim: Mapping[str, Collection[str]], + choice_class_config: ChoiceClassConfig, + time_period: str, + toll: Optional[bool] = False, + ) -> NumpyArray: + """Calculate the exp(utils) for the time, distance and costs skims. + + Loads the referenced skim matrices and calculates the result as: + exp(coeff_time * time + coeff_cost * (op_cost * dist + cost))) + + Args: + prop_to_skim: mapping of a property (used in coeffs) to a set of skim properties to sum + choice_class_config: A ChoiceClassConfig instance + time_period: time period abbrevation + + Returns: + A numpy array with the calculated exp(util) result. + """ + _util_sum = [] + property_factors = {} + if choice_class_config.property_factors is not None: + property_factors = { + x.property: x.coeff for x in choice_class_config.property_factors + } + for prop, skim_prop_list in prop_to_skim.items(): + if not toll: + _skim_values = get_summed_skims( + self.controller, + property=skim_prop_list, + mode=choice_class_config.skim_mode, + veh_group_name=choice_class_config.veh_group_name, + time_period=time_period, + omx_manager=self._omx_manager, + ) + else: + _skim_values = get_summed_skims( + self.controller, + property=skim_prop_list, + mode=choice_class_config.skim_mode + "toll", + veh_group_name=choice_class_config.veh_group_name, + time_period=time_period, + omx_manager=self._omx_manager, + ) + _util = self.utility[prop] * _skim_values * property_factors.get(prop, 1) + _util_sum.append(_util) + + self._omx_manager.close() # can comment out + + return np.exp(np.add(*_util_sum)) + + def mask_non_available( + self, + prob_nontoll, + time_period, + skim_mode, + veh_group_name, + prop_toll_cost="valuetoll", + prop_nontoll_time="time", + ) -> NumpyArray: + """Mask the nontoll probability matrix. + + Set to 1.0 if no toll path toll cost, or to 0.0 if no nontoll time. + + Args: + prob_nontoll: numpy array of calculated probability for non-toll + time_period: time period abbreviation + skim_mode: skim mode for getting skims + prop_toll_cost: the property to use to see if toll option is available + prop_nontoll_time: the property to use to see if a non-toll option is available + """ + + nontoll_time = get_omx_skim_as_numpy( + self.controller, + skim_mode, + veh_group_name, + time_period, + prop_nontoll_time, + omx_manager=self._omx_manager, + ) + + toll_tollcost = get_omx_skim_as_numpy( + self.controller, + skim_mode + "toll", + veh_group_name, + time_period, + prop_toll_cost, + omx_manager=self._omx_manager, + ) + + prob_nontoll[(toll_tollcost == 0) | (toll_tollcost > 999999)] = 1.0 + prob_nontoll[(nontoll_time == 0) | (nontoll_time > 999999)] = 0.0 + + self._omx_manager.close() + + return prob_nontoll diff --git a/tm2py/components/demand/visitor.py b/tm2py/components/demand/visitor.py index e69de29b..c26098a3 100644 --- a/tm2py/components/demand/visitor.py +++ b/tm2py/components/demand/visitor.py @@ -0,0 +1 @@ +"""Visitor module.""" diff --git a/tm2py/components/network/__init__.py b/tm2py/components/network/__init__.py index e69de29b..2144a36a 100644 --- a/tm2py/components/network/__init__.py +++ b/tm2py/components/network/__init__.py @@ -0,0 +1 @@ +"""Network-related components module.""" diff --git a/tm2py/components/network/active/active_modes.py b/tm2py/components/network/active/active_modes.py new file mode 100644 index 00000000..8cf917dc --- /dev/null +++ b/tm2py/components/network/active/active_modes.py @@ -0,0 +1,404 @@ +"""Geneates shortest path skims for walk and bike at MAZ, TAP or TAZ. + +Compute zone-to-zone (root-to-leaf) walk distance and bicycle distances + +Note: additional details in class docstring +""" + +from __future__ import annotations + +import os +from contextlib import contextmanager as _context +from typing import TYPE_CHECKING, List, Tuple + +import pandas as pd +from numpy import array as NumpyArray +from numpy import repeat + +from tm2py.components.component import Component +from tm2py.logger import LogStartEnd +from tm2py.tools import parse_num_processors + +if TYPE_CHECKING: + from tm2py.controller import RunController + + +ROOT_LEAF_ID_MAP = {"TAZ": "@taz_id", "TAP": "@tap_id", "MAZ": "@maz_id"} +SUBNETWORK_ID_MAP = {"walk": "@walk_link", "bike": "@bike_link"} +COUNTIES = [ + "San Francisco", + "San Mateo", + "Santa Clara", + "Alameda", + "Contra Costa", + "Solano", + "Napa", + "Sonoma", + "Marin", +] + + +class ActiveModesSkim(Component): + """Generate shortest distance skims between network nodes (TAZs, TAPs, or MAZs). + + Details controlled in config.active_modes, e.g.: + [active_modes] + emme_scenario_id = 1 + [[active_modes.shortest_path_skims]] + mode = "walk" + roots = "MAZ" + leaves = "MAZ" + max_dist_miles = 3 + output = "skims\\ped_distance_maz_maz.txt" + + Input: A scenario network containing the attributes + + + Output: (1) Shortest path skims in csv format: from_zone,to_zone,dist + skims\\ped_distance_maz_maz.txt + skims\\ped_distance_maz_tap.txt + skims\\bike_distance_maz_maz.txt + skims\\bike_distance_maz_tap.txt + skims\\bike_distance_taz_taz.txt + skims\\ped_distance_tap_tap.txt + + Internal properties: + _temp_scenario: temporary Emme scenario, deleted when component completes + _network: in-memory network object + """ + + def __init__(self, controller: RunController): + """Initialize active mode skim component. + + Args: + controller: parent Controller object + """ + super().__init__(controller) + self.config = self.controller.config.active_modes + self._temp_scenario = None + self._network = None + + def validate_inputs(self): + """Validate inputs files are correct, raise if an error is found.""" + # TODO + pass + + @LogStartEnd("active mode skim") + def run(self): + """Run shortest path skim calculation for active modes.""" + skim_list = self.config.shortest_path_skims + self._prepare_files() + for emmebank_path in [ + self.controller.config.emme.active_south_database_path, + self.controller.config.emme.active_north_database_path, + ]: + with self._setup(emmebank_path): + mode_codes = self._prepare_network() + for mode_id, spec in zip(mode_codes, skim_list): + for county in COUNTIES: + log_msg = ( + f"skim for mode={spec['mode']}, roots={spec['roots']}, " + f"leaves={spec['leaves']} county={county}" + ) + with self.logger.log_start_end(log_msg, level="DETAIL"): + roots, leaves = self._prepare_roots_leaves( + spec["roots"], spec["leaves"], county + ) + if not roots or not leaves: + continue + distance_skim = self._run_shortest_path( + mode_id, spec.get("max_dist_miles") + ) + self._export_results( + distance_skim, spec["output"], roots, leaves + ) + + @_context + def _setup(self, emmebank_path: str): + """Create temp scenario for setting of modes on links and roots and leaves. + + Temp scenario is deleted on exit. + """ + log_msg = f"Active modes shortest path skims {emmebank_path}" + manager = self.controller.emme_manager + with self.logger.log_start_end(log_msg, level="INFO"): + if emmebank_path == self.controller.config.emme.active_north_database_path: + emmebank = manager.active_north_emmebank + else: + emmebank = manager.active_south_emmebank + min_dims = emmebank.emmebank.min_dimensions + required_dims = { + "scenarios": 2, + "links": min(int(min_dims["links"] * 1.1), 2000000), + "extra_attribute_values": int(min_dims["extra_attribute_values"] * 1.5), + } + emmebank.change_dimensions(required_dims) + src_scenario = emmebank.emmebank.scenario(self.config.emme_scenario_id) + for avail_id in range(9999, 1, -1): + if not emmebank.emmebank.scenario(avail_id): + break + self._temp_scenario = emmebank.emmebank.create_scenario(avail_id) + try: + self._temp_scenario.has_traffic_results = ( + src_scenario.has_traffic_results + ) + self._temp_scenario.has_transit_results = ( + src_scenario.has_transit_results + ) + # Load network topology from disk (scenario in emmebank) + # Note: optimization to use get_partial_network, nodes and links + # only (instead of get_network), followed by loading + # only attribute values of interest (get_attribute_values) + # self._network = src_scenario.get_partial_network( + # ["NODE", "LINK"], include_attributes=False + # ) + self._network = src_scenario.get_network() + # Attributes which are used in any step in this component + # If additional attributes are required they must be added + # to this list + used_attributes = { + "NODE": list(ROOT_LEAF_ID_MAP.values()) + + ["@roots", "@leaves", "#node_county", "x", "y"], + "LINK": list(SUBNETWORK_ID_MAP.values()) + ["length"], + "TURN": [], + "TRANSIT_LINE": [], + "TRANSIT_SEGMENT": [], + } + for domain, attrs in used_attributes.items(): + attrs_to_load = [] + # create required attributes in temp scenario and network object + for attr_id in attrs: + if attr_id not in self._network.attributes(domain): + # create attributes which do not exist + self._network.create_attribute(domain, attr_id) + else: + # only load attributes which already exist + attrs_to_load.append(attr_id) + if attr_id.startswith("@"): + self._temp_scenario.create_extra_attribute(domain, attr_id) + if attr_id.startswith("#"): + self._temp_scenario.create_network_field( + domain, attr_id, "STRING" + ) + # load required attribute values from disk to network object + values = src_scenario.get_attribute_values(domain, attrs_to_load) + self._network.set_attribute_values(domain, attrs_to_load, values) + + # delete unused extra attributes/ network field from network object + for attr_id in self._network.attributes(domain): + if attr_id not in attrs and attr_id.startswith(("@", "#")): + self._network.delete_attribute(domain, attr_id) + self._network.publishable = True + yield + finally: + emmebank.emmebank.delete_scenario(self._temp_scenario) + self._network = None + + def _prepare_files(self): + """Clear all output files and write new headers.""" + for skim_spec in self.config.shortest_path_skims: + file_path = self.get_abs_path(skim_spec["output"]) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + with open(file_path, "w", encoding="utf8") as output_file: + pass + # java expects no headers + # from_zone,to_zone,to_zone,shortest_path_generalized_cost,shortest_path_distance_feet + + def _prepare_network(self): + """Setup network modes, link adjustments for walk and bike skims. + + Set link modes for skim root, leaf and access mode combinations. + Delete highway only links. + Add reverse links for walk mode. + """ + network = self._network + + # create reverse link for one-way walk links + # also removed walk and bike access from connectors + for link in network.links(): + if link[SUBNETWORK_ID_MAP["walk"]] and not link.reverse_link: + reverse = network.create_link(link.j_node, link.i_node, link.modes) + reverse.length = link.length + reverse.vertices = link.vertices + reverse[SUBNETWORK_ID_MAP["walk"]] = 1 + for attr in ROOT_LEAF_ID_MAP.values(): + if link.j_node[attr] or link.i_node[attr]: + for access_attr in SUBNETWORK_ID_MAP.values(): + link[access_attr] = 0 + + # create new modes for each skim: set node TAZ, MAZ, TAP attr to find connectors + # note that the TAZ, MAZ, TAP connectors must not have walk or bike access + # in order to prevent "shortcutting" via zones in shortest path building (removed above) + mode_codes = [] + for spec in self.config.shortest_path_skims: + mode = network.create_mode("AUX_AUTO", network.available_mode_identifier()) + mode_id_set = {mode.id} + mode_codes.append(mode.id) + # get network attribute names from parameters + root_attr = ROOT_LEAF_ID_MAP[ + spec["roots"] + ] # TAZ, TAP or MAZ as root (origin)? + leaf_attr = ROOT_LEAF_ID_MAP[ + spec["leaves"] + ] # TAZ, TAP or MAZ as leaf (dest)? + network_attr = SUBNETWORK_ID_MAP[spec["mode"]] # walk or bike mode + # define network access and egress to "zones" and subnetwork + # by setting the link.modes + for link in network.links(): + if ( + link.j_node[leaf_attr] + or link.i_node[root_attr] + or link[network_attr] + ): + link.modes |= mode_id_set + self._temp_scenario.publish_network(network) + return mode_codes + + def _prepare_roots_leaves( + self, root_type: str, leaf_type: str, county: str = None + ) -> Tuple[List[int], List[int]]: + """Set @roots and @leaves values for orig/dest nodes. + + Also return sequence of root and leaf IDs to match index for shortest + path numpy array. + """ + roots = [] + leaves = [] + for node in self._network.nodes(): + # filter to only origins by county (if used) + if county and node["#node_county"] != county: + node["@roots"] = 0 + else: + node["@roots"] = node[ROOT_LEAF_ID_MAP[root_type]] + node["@leaves"] = node[ROOT_LEAF_ID_MAP[leaf_type]] + if node["@roots"]: + roots.append(int(node["@roots"])) + if node["@leaves"]: + leaves.append(int(node["@leaves"])) + # save root and leaf IDs back to scenario for SP calc + values = self._network.get_attribute_values("NODE", ["@roots", "@leaves"]) + self._temp_scenario.set_attribute_values("NODE", ["@roots", "@leaves"], values) + self.logger.log_time( + f"num roots={len(roots)}, num leaves={len(leaves)}", level="DEBUG" + ) + return roots, leaves + + def _run_shortest_path(self, mode_code: str, max_dist: float) -> NumpyArray: + """Run Emme shortest path tool to get numpy array of distances.""" + shortest_paths = self.controller.emme_manager.tool( + "inro.emme.network_calculation.shortest_path" + ) + num_processors = parse_num_processors( + self.controller.config.emme.num_processors + ) + spec = { + "type": "SHORTEST_PATH", + "modes": [mode_code], + "root_nodes": "@roots", + "leaf_nodes": "@leaves", + "link_cost": "length", + "path_constraints": { + "max_cost": max_dist, + "uturn_allowed": False, + "through_leaves": False, + "through_centroids": False, + "exclude_forbidden_turns": False, + }, + "results": { + "skim_output": { + "format": "OMX", + "return_numpy": True, + "analyses": [ + { + "component": "SHORTEST_PATH_COST", + "operator": "+", + "name": "distance", + "description": "", + }, + ], + } + }, + "performance_settings": { + "number_of_processors": num_processors, + "direction": "AUTO", + "method": "STANDARD", + }, + } + results = shortest_paths(spec, scenario=self._temp_scenario) + return results["distance"] + + def _export_results( + self, + distance_skim: NumpyArray, + output: str, + roots: List[int], + leaves: List[int], + ): + """Export the distance skims for valid root/leaf pairs to csv.""" + # get the sequence of root / leaf (orig / dest) IDs + root_ids = repeat(roots, len(leaves)) + leaf_ids = leaves * len(roots) + distances = pd.DataFrame( + { + "root_ids": root_ids, + "leaf_ids": leaf_ids, + "leaf_ids_2": leaf_ids, + "dist": distance_skim.flatten(), + "dist_feet": distance_skim.flatten() * 5280, + } + ) + # convert node id to sequential (1-based) zone id + # consistent with tm2.1 - java expects this + zone_seq_file = self.get_abs_path(self.controller.config.scenario.zone_seq_file) + zone_seq_df = pd.read_csv(zone_seq_file) + taz_seq = dict( + zip( + zone_seq_df[zone_seq_df.TAZSEQ > 0].N, + zone_seq_df[zone_seq_df.TAZSEQ > 0].TAZSEQ, + ) + ) + maz_seq = dict( + zip( + zone_seq_df[zone_seq_df.MAZSEQ > 0].N, + zone_seq_df[zone_seq_df.MAZSEQ > 0].MAZSEQ, + ) + ) + tap_seq = dict( + zip( + zone_seq_df[zone_seq_df.TAPSEQ > 0].N, + zone_seq_df[zone_seq_df.TAPSEQ > 0].TAPSEQ, + ) + ) + ext_seq = dict( + zip( + zone_seq_df[zone_seq_df.EXTSEQ > 0].N, + zone_seq_df[zone_seq_df.EXTSEQ > 0].EXTSEQ, + ) + ) + taz_seq = {**taz_seq, **ext_seq} + for c in ["root_ids", "leaf_ids", "leaf_ids_2"]: + taz_bool = distances[c].isin(list(taz_seq.keys())) + maz_bool = distances[c].isin(list(maz_seq.keys())) + tap_bool = distances[c].isin(list(tap_seq.keys())) + if taz_bool.any(): + distances[c] = distances[c].map(taz_seq) + continue + elif maz_bool.any(): + distances[c] = distances[c].map(maz_seq) + continue + elif tap_bool.any(): + distances[c] = distances[c].map(tap_seq) + continue + else: + raise Exception( + "{} has N values not in the {} file".format(c, zone_seq_file) + ) + # drop 0's / 1e20 + distances = distances.query("dist > 0 & dist < 1e19") + # write remaining values to text file (append) + with open( + self.get_abs_path(output), "a", newline="", encoding="utf8" + ) as output_file: + distances.to_csv( + output_file, header=False, index=False, float_format="%.5f" + ) diff --git a/tm2py/components/network/create_tod_scenarios.py b/tm2py/components/network/create_tod_scenarios.py new file mode 100644 index 00000000..79a542f5 --- /dev/null +++ b/tm2py/components/network/create_tod_scenarios.py @@ -0,0 +1,635 @@ +""" +""" + +import os +from collections import defaultdict as _defaultdict +from contextlib import contextmanager as _context +from typing import TYPE_CHECKING, Any, Dict, Tuple, Union + +from tm2py.components.component import Component +from tm2py.logger import LogStartEnd +from tm2py.tools import SpatialGridIndex + +if TYPE_CHECKING: + from tm2py.controller import RunController + +_crs_wkt = """PROJCS["NAD83(HARN) / California zone 6 (ftUS)",GEOGCS["NAD83(HARN)", +DATUM["NAD83_High_Accuracy_Reference_Network",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]], +TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6152"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree", +0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4152"]],PROJECTION["Lambert_Conformal_Conic_2SP"], +PARAMETER["standard_parallel_1",33.88333333333333],PARAMETER["standard_parallel_2",32.78333333333333], +PARAMETER["latitude_of_origin",32.16666666666666],PARAMETER["central_meridian",-116.25],PARAMETER["false_easting", +6561666.667],PARAMETER["false_northing",1640416.667],UNIT["US survey foot",0.3048006096012192,AUTHORITY["EPSG", +"9003"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","2875"]] """ + + +class CreateTODScenarios(Component): + """Highway assignment and skims""" + + def __init__(self, controller: "RunController"): + """Highway assignment and skims. + + Args: + controller: parent Controller object + """ + super().__init__(controller) + self._emme_manager = None + self._ref_auto_network = None + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + + def run(self): + # project_path = self.get_abs_path(self.controller.config.emme.project_path) + # self._emme_manager = self.controller.emme_manager + # emme_app = self._emme_manager.project(project_path) + # self._emme_manager.init_modeller(emme_app) + with self._setup(): + # self._create_highway_scenarios() + self._create_transit_scenarios() + + @_context + def _setup(self): + self._ref_auto_network = None + try: + yield + finally: + self._ref_auto_network = None + + def _project_coordinates(self, ref_scenario): + modeller = self.controller.emme_manager.modeller + project_coord = modeller.tool( + "inro.emme.data.network.base.project_network_coordinates" + ) + + project_path = self.get_abs_path(self.controller.config.emme.project_path) + project_root = os.path.dirname(project_path) + emme_app = self.controller.emme_manager.project(project_path) + src_prj_file = emme_app.project.spatial_reference_file + if not src_prj_file: + raise Exception( + "Emme network coordinate reference system is not specified, unable to project coordinates for " + "area type calculation. Set correct Spatial Reference in Emme Project settings -> GIS." + ) + with open(src_prj_file, "r") as src_prj: + current_wkt = src_prj.read() + if current_wkt != _crs_wkt: + dst_prj_file = os.path.join( + project_root, "Media", "NAD83(HARN) California zone 6 (ftUS).prj" + ) + with open(dst_prj_file, "w") as dst_prj: + dst_prj.write(_crs_wkt) + project_coord( + from_scenario=ref_scenario, + from_proj_file=src_prj_file, + to_proj_file=dst_prj_file, + overwrite=True, + ) + emme_app.project.spatial_reference.file_path = dst_prj_file + emme_app.project.save() + + @LogStartEnd("Create highway time of day scenarios.") + def _create_highway_scenarios(self): + emmebank = self.controller.emme_manager.highway_emmebank.emmebank + ref_scenario = emmebank.scenario( + self.controller.config.emme.all_day_scenario_id + ) + self._ref_auto_network = ref_scenario.get_network() + n_time_periods = len(self.controller.config.time_periods) + self.controller.emme_manager.highway_emmebank.change_dimensions( + { + "scenarios": 1 + n_time_periods, + "full_matrices": 9999, + "extra_attribute_values": 60000000, + } + ) + # create VDFs & set cross-reference function parameters + emmebank.extra_function_parameters.el1 = "@free_flow_time" + emmebank.extra_function_parameters.el2 = "@capacity" + emmebank.extra_function_parameters.el3 = "@ja" + emmebank.extra_function_parameters.el4 = "@static_rel" + reliability_tmplt = ( + "* (1 + el4 + " + "( {factor[LOS_C]} * ( put(get(1).min.1.5) - {threshold[LOS_C]} + 0.01 ) ) * (get(1) .gt. {threshold[LOS_C]})" + "+ ( {factor[LOS_D]} * ( get(2) - {threshold[LOS_D]} + 0.01 ) ) * (get(1) .gt. {threshold[LOS_D]})" + "+ ( {factor[LOS_E]} * ( get(2) - {threshold[LOS_E]} + 0.01 ) ) * (get(1) .gt. {threshold[LOS_E]})" + "+ ( {factor[LOS_FL]} * ( get(2) - {threshold[LOS_FL]} + 0.01 ) ) * (get(1) .gt. {threshold[LOS_FL]})" + "+ ( {factor[LOS_FH]} * ( get(2) - {threshold[LOS_FH]} + 0.01 ) ) * (get(1) .gt. {threshold[LOS_FH]})" + ")" + ) + parameters = { + "freeway": { + "factor": { + "LOS_C": 0.2429, + "LOS_D": 0.1705, + "LOS_E": -0.2278, + "LOS_FL": -0.1983, + "LOS_FH": 1.022, + }, + "threshold": { + "LOS_C": 0.7, + "LOS_D": 0.8, + "LOS_E": 0.9, + "LOS_FL": 1.0, + "LOS_FH": 1.2, + }, + }, + "road": { # for arterials, ramps, collectors, local roads, etc. + "factor": { + "LOS_C": 0.1561, + "LOS_D": 0.0, + "LOS_E": 0.0, + "LOS_FL": -0.449, + "LOS_FH": 0.0, + }, + "threshold": { + "LOS_C": 0.7, + "LOS_D": 0.8, + "LOS_E": 0.9, + "LOS_FL": 1.0, + "LOS_FH": 1.2, + }, + }, + } + # TODO: should have just 3 functions, and map the FT to the vdf + # TODO: could optimize expression (to review) + bpr_tmplt = "el1 * (1 + 0.20 * ((volau + volad)/el2/0.75)^6)" + # "el1 * (1 + 0.20 * put(put((volau + volad)/el2/0.75))*get(1))*get(2)*get(2)" + fixed_tmplt = "el1" + akcelik_tmplt = ( + "(el1 + 60 * (0.25 *((volau + volad)/el2 - 1 + " + "(((volau + volad)/el2 - 1)^2 + el3 * (volau + volad)/el2)^0.5)))" + # "(el1 + 60 * (0.25 *(put(put((volau + volad)/el2) - 1) + " + # "(((get(2)*get(2) + (16 * el3 * get(1)^0.5))))" + ) + for f_id in ["fd1", "fd2"]: + if emmebank.function(f_id): + emmebank.delete_function(f_id) + emmebank.create_function( + f_id, bpr_tmplt + reliability_tmplt.format(**parameters["freeway"]) + ) + for f_id in [ + "fd3", + "fd4", + "fd5", + "fd6", + "fd7", + "fd9", + "fd10", + "fd11", + "fd12", + "fd13", + "fd14", + "fd99", + ]: + if emmebank.function(f_id): + emmebank.delete_function(f_id) + emmebank.create_function( + f_id, akcelik_tmplt + reliability_tmplt.format(**parameters["road"]) + ) + if emmebank.function("fd8"): + emmebank.delete_function("fd8") + emmebank.create_function("fd8", fixed_tmplt) + + ref_scenario = emmebank.scenario( + self.controller.config.emme.all_day_scenario_id + ) + attributes = { + "LINK": ["@area_type", "@capclass", "@free_flow_speed", "@free_flow_time"] + } + for domain, attrs in attributes.items(): + for name in attrs: + if ref_scenario.extra_attribute(name) is None: + ref_scenario.create_extra_attribute(domain, name) + + network = ref_scenario.get_network() + self._set_area_type(network) + self._set_capclass(network) + self._set_speed(network) + ref_scenario.publish_network(network) + self._ref_auto_network = network + + self._prepare_scenarios_and_attributes(emmebank) + + @LogStartEnd("Create transit time of day scenarios.") + def _create_transit_scenarios(self): + with self.logger.log_start_end("prepare base scenario"): + emmebank = self.controller.emme_manager.transit_emmebank.emmebank + n_time_periods = len(self.controller.config.time_periods) + required_dims = { + "full_matrices": 9999, + "scenarios": 1 + n_time_periods, + "regular_nodes": 650000, + "links": 1900000, + "transit_vehicles": 600, # pnr vechiles + "transit_segments": 1800000, + "extra_attribute_values": 200000000, + } + self.controller.emme_manager.transit_emmebank.change_dimensions( + required_dims + ) + for ident in ["ft1", "ft2", "ft3"]: + if emmebank.function(ident): + emmebank.delete_function(ident) + # for zero-cost links + emmebank.create_function("ft1", "0") + # segment travel time pre-calculated and stored in data1 (copied from @trantime_seg) + emmebank.create_function("ft2", "us1") + + ref_scenario = emmebank.scenario( + self.controller.config.emme.all_day_scenario_id + ) + attributes = { + "LINK": [ + "@trantime", + "@area_type", + "@capclass", + "@free_flow_speed", + "@free_flow_time", + "@drive_toll" + ], + "TRANSIT_LINE": [ + "@invehicle_factor", + "@iboard_penalty", + "@xboard_penalty", + "@orig_hdw" + ], + "NODE": [ + "@hdw_fraction", + "@wait_pfactor", + "@xboard_nodepen" + ] + } + for domain, attrs in attributes.items(): + for name in attrs: + if ref_scenario.extra_attribute(name) is None: + ref_scenario.create_extra_attribute(domain, name) + network = ref_scenario.get_network() + # auto_network = self._ref_auto_network + # # copy link attributes from auto network to transit network + # link_lookup = {} + # for link in auto_network.links(): + # link_lookup[link["#link_id"]] = link + # for link in network.links(): + # auto_link = link_lookup.get(link["#link_id"]) + # if not auto_link: + # continue + # for attr in [ + # "@area_type", + # "@capclass", + # "@free_flow_speed", + # "@free_flow_time", + # ]: + # link[attr] = auto_link[attr] + + mode_table = self.controller.config.transit.modes + in_vehicle_factors = {} + initial_boarding_penalty = {} + transfer_boarding_penalty = {} + headway_fraction = {} + transfer_wait_perception_factor = {} + + default_in_vehicle_factor = self.controller.config.transit.get( + "in_vehicle_perception_factor", 1.0 + ) + default_initial_boarding_penalty = self.controller.config.transit.get( + "initial_boarding_penalty", 10 + ) + default_transfer_boarding_penalty = self.controller.config.transit.get( + "transfer_boarding_penalty", 10 + ) + default_headway_fraction = self.controller.config.transit.get( + "headway_fraction", 0.5 + ) + default_transfer_wait_perception_factor = self.controller.config.transit.get( + "transfer_wait_perception_factor", 1 + ) + walk_perception_factor = self.controller.config.transit.get( + "walk_perception_factor", 2 + ) + walk_perception_factor_cbd = self.controller.config.transit.get( + "walk_perception_factor_cbd", 1 + ) + drive_perception_factor = self.controller.config.transit.get( + "drive_perception_factor", 2 + ) + # walk_modes = set() + # access_modes = set() + # egress_modes = set() + # local_modes = set() + # premium_modes = set() + for mode_data in mode_table: + mode = network.mode(mode_data["mode_id"]) + if mode is None: + mode = network.create_mode( + mode_data["assign_type"], mode_data["mode_id"] + ) + elif mode.type != mode_data["assign_type"]: + raise Exception( + f"mode {mode_data['id']} already exists with type {mode.type} instead of {mode_data['assign_type']}" + ) + mode.description = mode_data["name"] + if mode_data['assign_type'] == "AUX_TRANSIT": + if mode_data['type'] == "DRIVE": + mode.speed = "ul1*%s" % drive_perception_factor + else: + mode.speed = mode_data['speed_or_time_factor'] + # if mode_data["assign_type"] == "AUX_TRANSIT": + # mode.speed = mode_data["speed_miles_per_hour"] + # if mode_data["type"] == "WALK": + # walk_modes.add(mode.id) + # elif mode_data["type"] == "ACCESS": + # access_modes.add(mode.id) + # elif mode_data["type"] == "EGRESS": + # egress_modes.add(mode.id) + # elif mode_data["type"] == "LOCAL": + # local_modes.add(mode.id) + # elif mode_data["type"] == "PREMIUM": + # premium_modes.add(mode.id) + in_vehicle_factors[mode.id] = mode_data.get( + "in_vehicle_perception_factor", default_in_vehicle_factor) + initial_boarding_penalty[mode.id] = mode_data.get( + "initial_boarding_penalty", default_initial_boarding_penalty) + transfer_boarding_penalty[mode.id] = mode_data.get( + "transfer_boarding_penalty", default_transfer_boarding_penalty) + headway_fraction[mode.id] = mode_data.get( + "headway_fraction", default_headway_fraction) + transfer_wait_perception_factor[mode.id] = mode_data.get( + "transfer_wait_perception_factor", default_transfer_wait_perception_factor) + + # create vehicles + # vehicle_table = self.controller.config.transit.vehicles + # for veh_data in vehicle_table: + # vehicle = network.transit_vehicle(veh_data["vehicle_id"]) + # if vehicle is None: + # vehicle = network.create_transit_vehicle( + # veh_data["vehicle_id"], veh_data["mode"] + # ) + # elif vehicle.mode.id != veh_data["mode"]: + # raise Exception( + # f"vehicle {veh_data['vehicle_id']} already exists with mode {vehicle.mode.id} instead of {veh_data['mode']}" + # ) + # vehicle.auto_equivalent = veh_data["auto_equivalent"] + # vehicle.seated_capacity = veh_data["seated_capacity"] + # vehicle.total_capacity = veh_data["total_capacity"] + + # set fixed guideway times, and initial free flow auto link times + # TODO: cntype_speed_map to config + cntype_speed_map = { + "CRAIL": 45.0, + "HRAIL": 40.0, + "LRAIL": 30.0, + "FERRY": 15.0, + } + walk_speed = self.controller.config.transit.get( + "walk_speed", 3.0 + ) + transit_speed = self.controller.config.transit.get( + "transit_speed", 30.0 + ) + for link in network.links(): + speed = cntype_speed_map.get(link["#cntype"]) + if speed is None: + # speed = link["@free_flow_speed"] + speed = 30.0 # temp fix, will uncomment it when bring in highway changes + if link["@ft"] == 1 and speed > 0: + link["@trantime"] = 60 * link.length / speed + elif speed > 0: + link["@trantime"] = ( + 60 * link.length / speed + link.length * 5 * 0.33 + ) + else: + link["@trantime"] = 0 + else: + link["@trantime"] = 60 * link.length / speed + link.data1 = link["@trantime"] + # # set TAP connector distance to 60 feet + # if link.i_node.is_centroid or link.j_node.is_centroid: + # link.length = 0.01 # 60.0 / 5280.0 + for line in network.transit_lines(): + # TODO: may want to set transit line speeds (not necessarily used in the assignment though) + line_veh = network.transit_vehicle(line["#vehtype"]) # use #vehtype here instead of #mode (#vehtype is vehtype_num in Lasso\mtc_data\lookups\transitSeatCap.csv) + if line_veh is None: + raise Exception( + f"line {line.id} requires vehicle ('#vehtype') {line['#vehtype']} which does not exist" + ) + line_mode = line_veh.mode.id + for seg in line.segments(): + seg.link.modes |= {line_mode} + line.vehicle = line_veh + # Set the perception factor from the mode table + line["@invehicle_factor"] = in_vehicle_factors[line.vehicle.mode.id] + line["@iboard_penalty"] = initial_boarding_penalty[line.vehicle.mode.id] + line["@xboard_penalty"] = transfer_boarding_penalty[line.vehicle.mode.id] + + # # set link modes to the minimum set + # auto_mode = {self.controller.config.highway.generic_highway_mode_code} + # for link in network.links(): + # # get used transit modes on link + # modes = {seg.line.mode for seg in link.segments()} + # # add in available modes based on link type + # if link["@drive_link"]: + # modes |= local_modes + # modes |= auto_mode + # if link["@bus_only"]: + # modes |= local_modes + # if link["@rail_link"] and not modes: + # modes |= premium_modes + # # add access, egress or walk mode (auxilary transit modes) + # if link.i_node.is_centroid: + # modes |= egress_modes + # elif link.j_node.is_centroid: + # modes |= access_modes + # elif link["@walk_link"]: + # modes |= walk_modes + # if not modes: # in case link is unused, give it the auto mode + # link.modes = auto_mode + # else: + # link.modes = modes + for link in network.links(): + # set default values + link.i_node['@hdw_fraction'] = default_headway_fraction + link.i_node['@wait_pfactor'] = default_transfer_wait_perception_factor + link.i_node['@xboard_nodepen'] = 1 + link.j_node['@hdw_fraction'] = default_headway_fraction + link.j_node['@wait_pfactor'] = default_transfer_wait_perception_factor + link.j_node['@xboard_nodepen'] = 1 + # update modes on connectors + if (link.i_node.is_centroid) and (link["@drive_link"]==0): + link.modes = "a" + elif (link.j_node.is_centroid) and (link["@drive_link"]==0): + link.modes = "e" + elif (link.i_node.is_centroid or link.j_node.is_centroid ) and (link["@drive_link"]!=0): + link.modes = set([network.mode('c'), network.mode('D')]) + # calculate perceived walk time + # perceived walk time will be used in walk mode definition "ul2", + # link.data1 is used to save congested bus time, so use link.data2 here + if link["@area_type"] in [0,1]: + link.data2 = 60 * link.length / (walk_speed / walk_perception_factor_cbd) + else: + link.data2 = 60 * link.length / (walk_speed / walk_perception_factor) + + # set headway fraction, transfer wait perception and transfer boarding penalty at specific nodes + for line in network.transit_lines(): + if line.vehicle.mode.id == "r": + for seg in line.segments(): + seg.i_node['@hdw_fraction'] = headway_fraction[line.vehicle.mode.id] + seg.j_node['@hdw_fraction'] = headway_fraction[line.vehicle.mode.id] + elif line.vehicle.mode.id == "f": + for seg in line.segments(): + seg.i_node['@hdw_fraction'] = headway_fraction[line.vehicle.mode.id] + seg.i_node['@wait_pfactor'] = transfer_wait_perception_factor[line.vehicle.mode.id] + seg.j_node['@hdw_fraction'] = headway_fraction[line.vehicle.mode.id] + seg.j_node['@wait_pfactor'] = transfer_wait_perception_factor[line.vehicle.mode.id] + elif line.vehicle.mode.id =="h": + for seg in line.segments(): + if seg.i_node['#node_id'] in self.controller.config.transit.timed_transfer_nodes: + seg.i_node['@xboard_nodepen'] = 0 + + ref_scenario.publish_network(network) + + self._prepare_scenarios_and_attributes(emmebank) + + with self.logger.log_start_end("remove transit lines from other periods"): + for period in self.controller.config.time_periods: + period_name = period.name.upper() + with self.logger.log_start_end(f"period {period_name}"): + scenario = emmebank.scenario(period.emme_scenario_id) + network = scenario.get_network() + # removed transit lines from other periods from per-period scenarios + for line in network.transit_lines(): + if line["#time_period"].upper() != period_name: + network.delete_transit_line(line) + scenario.publish_network(network) + + @LogStartEnd("Copy base to period scenarios and set per-period attributes") + def _prepare_scenarios_and_attributes(self, emmebank): + ref_scenario = emmebank.scenario( + self.controller.config.emme.all_day_scenario_id + ) + # self._project_coordinates(ref_scenario) + # find all time-of-day attributes (ends with period name) + tod_attr_groups = { + "NODE": _defaultdict(lambda: []), + "LINK": _defaultdict(lambda: []), + "TURN": _defaultdict(lambda: []), + "TRANSIT_LINE": _defaultdict(lambda: []), + "TRANSIT_SEGMENT": _defaultdict(lambda: []), + } + for attr in ref_scenario.extra_attributes(): + for period in self.controller.config.time_periods: + if attr.name.endswith(period.name): + tod_attr_groups[attr.type][attr.name[: -len(period.name)]].append( + attr.name + ) + for period in self.controller.config.time_periods: + scenario = emmebank.scenario(period.emme_scenario_id) + if scenario: + emmebank.delete_scenario(scenario) + scenario = emmebank.copy_scenario(ref_scenario, period.emme_scenario_id) + scenario.title = f"{period.name} {ref_scenario.title}"[:60] + # in per-period scenario create attributes without period suffix, copy values + # for this period and delete all other period attributes + for domain, all_attrs in tod_attr_groups.items(): + for root_attr, tod_attrs in all_attrs.items(): + src_attr = f"{root_attr}{period.name}" + if root_attr.endswith("_"): + root_attr = root_attr[:-1] + for attr in tod_attrs: + if attr != src_attr: + scenario.delete_extra_attribute(attr) + attr = scenario.create_extra_attribute(domain, root_attr) + attr.description = scenario.extra_attribute(src_attr).description + values = scenario.get_attribute_values(domain, [src_attr]) + scenario.set_attribute_values(domain, [root_attr], values) + scenario.delete_extra_attribute(src_attr) + + def _set_area_type(self, network): + # set area type for links based on average density of MAZ closest to I or J node + # the average density including all MAZs within the specified buffer distance + buff_dist = 5280 * self.controller.config.highway.area_type_buffer_dist_miles + maz_data_file_path = self.get_abs_path( + self.controller.config.scenario.maz_landuse_file + ) + maz_landuse_data: Dict[ + int, Dict[Any, Union[str, int, Tuple[float, float]]] + ] = {} + with open(maz_data_file_path, "r") as maz_data_file: + header = [h.strip() for h in next(maz_data_file).split(",")] + for line in maz_data_file: + data = dict(zip(header, line.split(","))) + maz_landuse_data[int(data["MAZ_ORIGINAL"])] = data + # Build spatial index of MAZ node coords + sp_index_maz = SpatialGridIndex(size=0.5 * 5280) + for node in network.nodes(): + if node["@maz_id"]: + x, y = node.x, node.y + maz_landuse_data[int(node["@maz_id"])]["coords"] = (x, y) + sp_index_maz.insert(int(node["@maz_id"]), x, y) + for maz_landuse in maz_landuse_data.values(): + x, y = maz_landuse.get("coords", (None, None)) + if x is None: + continue # some MAZs in table might not be in network + # Find all MAZs with the square buffer (including this one) + # (note: square buffer instead of radius used to match earlier implementation) + other_maz_ids = sp_index_maz.within_square(x, y, buff_dist) + # Sum total landuse attributes within buffer distance + total_pop = sum( + int(maz_landuse_data[maz_id]["POP"]) for maz_id in other_maz_ids + ) + total_emp = sum( + int(maz_landuse_data[maz_id]["emp_total"]) for maz_id in other_maz_ids + ) + total_acres = sum( + float(maz_landuse_data[maz_id]["ACRES"]) for maz_id in other_maz_ids + ) + # calculate buffer area type + if total_acres > 0: + density = (1 * total_pop + 2.5 * total_emp) / total_acres + else: + density = 0 + # code area type class + if density < 6: + maz_landuse["area_type"] = 5 # rural + elif density < 30: + maz_landuse["area_type"] = 4 # suburban + elif density < 55: + maz_landuse["area_type"] = 3 # urban + elif density < 100: + maz_landuse["area_type"] = 2 # urban business + elif density < 300: + maz_landuse["area_type"] = 1 # cbd + else: + maz_landuse["area_type"] = 0 # regional core + # Find nearest MAZ for each link, take min area type of i or j node + for link in network.links(): + i_node, j_node = link.i_node, link.j_node + a_maz = sp_index_maz.nearest(i_node.x, i_node.y) + b_maz = sp_index_maz.nearest(j_node.x, j_node.y) + link["@area_type"] = min( + maz_landuse_data[a_maz]["area_type"], + maz_landuse_data[b_maz]["area_type"], + ) + + @staticmethod + def _set_capclass(network): + for link in network.links(): + area_type = link["@area_type"] + if area_type < 0: + link["@capclass"] = -1 + elif (link["@ft"] == 99) & (link["@assignable"] == 1): + link["@capclass"] = 10 * area_type + 7 + else: + link["@capclass"] = 10 * area_type + link["@ft"] + + def _set_speed(self, network): + free_flow_speed_map = {} + for row in self.controller.config.highway.capclass_lookup: + if row.get("free_flow_speed") is not None: + free_flow_speed_map[row["capclass"]] = row.get("free_flow_speed") + for link in network.links(): + # default speed o 25 mph if missing or 0 in table map + link["@free_flow_speed"] = free_flow_speed_map.get(link["@capclass"], 25) + speed = link["@free_flow_speed"] or 25 + link["@free_flow_time"] = 60 * link.length / speed diff --git a/tm2py/components/network/highway/__init__.py b/tm2py/components/network/highway/__init__.py index e69de29b..e0eeaa17 100644 --- a/tm2py/components/network/highway/__init__.py +++ b/tm2py/components/network/highway/__init__.py @@ -0,0 +1 @@ +"""Highway network module.""" diff --git a/tm2py/components/network/highway/drive_access_skims.py b/tm2py/components/network/highway/drive_access_skims.py new file mode 100644 index 00000000..59eb82b1 --- /dev/null +++ b/tm2py/components/network/highway/drive_access_skims.py @@ -0,0 +1,276 @@ +"""Module containing the """ + +import os +from typing import TYPE_CHECKING + +import numpy as np +import pandas as pd + +from tm2py.components.component import Component +from tm2py.config import TimePeriodConfig +from tm2py.emme.matrix import OMXManager +from tm2py.logger import LogStartEnd + +if TYPE_CHECKING: + from tm2py.controller import RunController + + +MODE_NAME_MAP = { + "b": "LOCAL_BUS", + "f": "FERRY_SERVICE", + "h": "HEAVY_RAIL", + "l": "LIGHT_RAIL", + "r": "COMMUTER_RAIL", + "x": "EXPRESS_BUS", +} + + +class DriveAccessSkims(Component): + """Joins the highway skims with the nearest TAP to support drive access to transit. + + The procedure is: + Get closest maz (walk distance) for each TAP, from active mode skims + Get taz corresponding to that maz (maz-taz lookup table) + for each time period + for each TAZ + for each mode + find closest TAP (smallest gen cost+MAZ walk access, ignores transit time) + write row of FTAZ,MODE,PERIOD,TTAP,TMAZ,TTAZ,DTIME,DDIST,DTOLL,WDIST + """ + + @LogStartEnd() + def run(self): + results_path = self._init_results_file() + maz_taz = self._maz_taz_correspondence() + ped_dist = self._get_ped_dist() + maz_taz_tap = maz_taz.merge(ped_dist, on="TMAZ") + for period in self.controller.config.time_periods: + tap_modes = self._get_tap_modes(period) + # convert TAP node id to 1-based sequential ID + zone_seq_file = self.get_abs_path( + self.controller.config.scenario.zone_seq_file + ) + zone_seq_df = pd.read_csv(zone_seq_file) + tap_seq = dict( + zip( + zone_seq_df[zone_seq_df.TAPSEQ > 0].N, + zone_seq_df[zone_seq_df.TAPSEQ > 0].TAPSEQ, + ) + ) + tap_modes["TTAP"] = tap_modes["TTAP"].map(tap_seq) + maz_ttaz_tap_modes = maz_taz_tap.merge(tap_modes, on="TTAP") + drive_costs = self._get_drive_costs(period) + taz_seq = dict( + zip( + zone_seq_df[zone_seq_df.TAZSEQ > 0].N, + zone_seq_df[zone_seq_df.TAZSEQ > 0].TAZSEQ, + ) + ) + drive_costs["TTAZ"] = drive_costs["TTAZ"].map(taz_seq) + drive_costs["FTAZ"] = drive_costs["FTAZ"].map(taz_seq) + taz_to_tap_costs = drive_costs.merge(maz_ttaz_tap_modes, on="TTAZ") + closest_taps = self._get_closest_taps(taz_to_tap_costs, period) + with open(results_path, "a", newline="", encoding="utf8") as output_file: + closest_taps.to_csv( + output_file, header=False, index=False, float_format="%.5f" + ) + + def validate_inputs(self): + """Validate inputs files are correct, raise if an error is found.""" + # TODO + pass + + def _init_results_file(self) -> str: + """Initialize and write header to results file""" + output_file_path = self.get_abs_path( + self.controller.config.highway.drive_access_output_skim_path + ) + os.makedirs(os.path.dirname(output_file_path), exist_ok=True) + with open(output_file_path, "w", encoding="utf8") as output_file: + output_file.write( + "FTAZ,MODE,PERIOD,TTAP,TMAZ,TTAZ,DTIME,DDIST,DTOLL,WDIST\n" + ) + return output_file_path + + def _maz_taz_correspondence(self) -> pd.DataFrame: + """Load maz data (landuse file) which has the MAZ-> TAZ correspondence""" + maz_data_file = self.get_abs_path( + self.controller.config.scenario.maz_landuse_file + ) + maz_input_data = pd.read_csv(maz_data_file) + # drop the other landuse columns + + # disable no-member error as Pandas returns either a parser object or a dataframe + # depending upon the inputs to pd.read_csv, and in this case we get a + # dataframe so in fact it has .columns + # set the maz sequence numbers + # pylint: disable=E1101 + maz_taz_mapping = maz_input_data.drop( + columns=list(set(maz_input_data.columns) - {"MAZ_ORIGINAL", "TAZ_ORIGINAL"}) + ) + maz_taz_mapping["TMAZ"] = maz_taz_mapping.index + 1 + # Get taz seq numbers + taz_ids = maz_taz_mapping["TAZ_ORIGINAL"].unique() + taz_ids.sort() + taz_seq = list(range(1, len(taz_ids) + 1)) + taz_seq_mapping = pd.DataFrame({"TAZ_ORIGINAL": taz_ids, "TTAZ": taz_seq}) + # Merge them back to get a table with the MAZ sequence and TAZ sequence (TMAZ and TTAZ) + maz_taz = maz_taz_mapping.merge(taz_seq_mapping, on="TAZ_ORIGINAL") + maz_taz.drop(columns=["MAZ_ORIGINAL", "TAZ_ORIGINAL"], inplace=True) + return maz_taz + + def _get_ped_dist(self) -> pd.DataFrame: + """Get walk distance from closest maz to tap""" + # Load the shortest distance skims from the active mode skims results + for skim_spec in self.controller.config.active_modes.shortest_path_skims: + if ( + skim_spec.mode == "walk" + and skim_spec.roots == "MAZ" + and skim_spec.leaves == "TAP" + ): + ped_skim_path = skim_spec["output"] + break + else: + raise Exception( + "No skim mode of WALK: MAZ->MAZ in active_modes.shortest_path_skims" + ) + ped_dist = pd.read_csv( + self.get_abs_path(ped_skim_path), + names=["TMAZ", "TTAP", "TTAP_2", "WDIST_MILE", "WDIST"], + header=None, + ) + ped_dist = ped_dist[["TMAZ", "TTAP", "WDIST"]] + # Get closest MAZ to each TAZ + # disable no-member error as Pandas returns either a parser object or a dataframe + # depending upon the inputs to pd.read_csv, and in this case we get a + # dataframe so in fact it has .sort_values + # pylint: disable=E1101 + ped_dist.sort_values(["TTAP", "WDIST"], inplace=True) + ped_dist.drop_duplicates("TTAP", inplace=True) + return ped_dist + + def _get_tap_modes(self, period: TimePeriodConfig) -> pd.DataFrame: + """Get the set of modes available from each TAP.""" + emmebank = self.controller.emme_manager.transit_emmebank.emmebank + # load Emme network for TAP<->available modes correspondence + scenario = emmebank.scenario(period.emme_scenario_id) + attrs_to_load = { + "NODE": ["@tap_id"], + "TRANSIT_LINE": [], + "TRANSIT_SEGMENT": ["allow_alightings", "allow_boardings"], + } + if self.controller.config.transit.use_fares: + attrs_to_load["TRANSIT_LINE"].append("#src_mode") + + def process_stops(stops): + modes = set() + for stop in stops: + for seg in stop.outgoing_segments(include_hidden=True): + if seg.allow_alightings or seg.allow_boardings: + modes.add(MODE_NAME_MAP[seg.line["#src_mode"]]) + return modes + + else: + + def process_stops(stops): + modes = set() + for stop in stops: + for seg in stop.outgoing_segments(include_hidden=True): + if seg.allow_alightings or seg.allow_boardings: + modes.add(MODE_NAME_MAP[seg.line.mode.id]) + return modes + + network = self.controller.emme_manager.get_network(scenario, attrs_to_load) + tap_ids = [] + tap_mode_ids = [] + for node in network.nodes(): + if node["@tap_id"] == 0: + continue + stops = set([]) + for link in node.outgoing_links(): + for next_link in link.j_node.outgoing_links(): + stops.add(next_link.j_node) + modes = process_stops(stops) + if modes: + tap_mode_ids.extend(modes) + tap_ids.extend([node["@tap_id"]] * len(modes)) + tap_modes = pd.DataFrame({"TTAP": tap_ids, "MODE": tap_mode_ids}) + return tap_modes + + def _get_drive_costs(self, period: TimePeriodConfig) -> pd.DataFrame: + """Load the drive costs from OMX matrix files, return as pandas dataframe.""" + emmebank = self.controller.emme_manager.highway_emmebank.emmebank + scenario = emmebank.scenario(period.emme_scenario_id) + zone_numbers = scenario.zone_numbers + network = self.controller.emme_manager.get_network( + scenario, {"NODE": ["#node_county", "@taz_id"]} + ) + externals = [ + n["@taz_id"] + for n in network.nodes() + if n["@taz_id"] > 0 and n["#node_county"] == "External" + ] + root_ids = np.repeat(zone_numbers, len(zone_numbers)) + leaf_ids = zone_numbers * len(zone_numbers) + + skim_src_file = self.get_abs_path( + self.controller.config.highway.output_skim_path + / self.controller.config.highway.output_skim_filename_tmpl.format( + time_period=period.name + ) + ) + with OMXManager(skim_src_file, "r") as src_file: + drive_costs = pd.DataFrame( + { + "FTAZ": root_ids, + "TTAZ": leaf_ids, + "DDIST": src_file.read(f"{period.name.upper()}_da_dist").flatten(), + "DTOLL": src_file.read( + f"{period.name.upper()}_da_bridgetoll_da" + ).flatten(), + "DTIME": src_file.read(f"{period.name.upper()}_da_time").flatten(), + } + ) + src_file.close() + # drop externals + drive_costs = drive_costs[~drive_costs["FTAZ"].isin(externals)] + drive_costs = drive_costs[~drive_costs["TTAZ"].isin(externals)] + # drop inaccessible zones + drive_costs = drive_costs.query("DTIME > 0 & DTIME < 1e19") + return drive_costs + + @staticmethod + def _get_closest_taps( + taz_to_tap_costs: pd.DataFrame, period: TimePeriodConfig + ) -> pd.DataFrame: + """Calculate the TAZ-> TAP drive cost, and get the closest TAP for each TAZ.""" + # cost = time + vot * (dist * auto_op_cost + toll) + value_of_time = 18.93 + operating_cost_per_mile = 17.23 + auto_op_cost = operating_cost_per_mile # / 5280 # correct for feet + vot = 0.6 / value_of_time # turn into minutes / cents + walk_speed = 60.0 / 3.0 # minutes / miles + taz_to_tap_costs["COST"] = ( + taz_to_tap_costs["DTIME"] + + walk_speed * (taz_to_tap_costs["WDIST"] / 5280) + + vot + * (auto_op_cost * taz_to_tap_costs["DDIST"] + taz_to_tap_costs["DTOLL"]) + ) + # sort by mode, from taz and gen cost to get the closest TTAP to each FTAZ + # for each mode, then drop subsequent rows + closest_tap_costs = taz_to_tap_costs.sort_values(["MODE", "FTAZ", "COST"]) + closest_tap_costs.drop_duplicates(["MODE", "FTAZ"], inplace=True) + closest_tap_costs["PERIOD"] = period.name + columns = [ + "FTAZ", + "MODE", + "PERIOD", + "TTAP", + "TMAZ", + "TTAZ", + "DTIME", + "DDIST", + "DTOLL", + "WDIST", + ] + return closest_tap_costs[columns] diff --git a/tm2py/components/network/highway/highway_assign.py b/tm2py/components/network/highway/highway_assign.py index b4cbeb8a..16fe772f 100644 --- a/tm2py/components/network/highway/highway_assign.py +++ b/tm2py/components/network/highway/highway_assign.py @@ -1,57 +1,57 @@ """Highway assignment and skim component. Performs equilibrium traffic assignment and generates resulting skims. - The assignmend is configured using the "highway" table in the source config. See the config documentation for details. The traffic assignment runs according to the list of assignment classes under highway.classes. -Other relevant parameters from the config are - emme.num_processors: number of processors as integer or "MAX" or "MAX-N" - time_periods[].emme_scenario_id: Emme scenario number to use for each period - time_periods[].highway_capacity_factor +Other relevant parameters from the config are: +- emme.num_processors: number of processors as integer or "MAX" or "MAX-N" +- time_periods[].emme_scenario_id: Emme scenario number to use for each period +- time_periods[].highway_capacity_factor The Emme network must have the following attributes available: - Link: - - "length" in feet - - "vdf", volume delay function (volume delay functions must also be setup) - - "@useclass", vehicle-class restrictions classification, auto-only, HOV only - - "@free_flow_time", the free flow time (in minutes) - - "@tollXX_YY", the toll for period XX and class subgroup (see truck - class) named YY, used together with @tollbooth to generate @bridgetoll_YY - and @valuetoll_YY - - "@maz_flow", the background traffic MAZ-to-MAZ SP assigned flow from highway_maz, - if controller.iteration > 0 - - modes: must be set on links and match the specified mode codes in - the traffic config - - Network results: - - @flow_XX: link PCE flows per class, where XX is the class name in the config - - timau: auto travel time - - volau: total assigned flow in PCE - - Notes: - - Output matrices are in miles, minutes, and cents (2010 dollars) and are stored/ - as real values; - - Intrazonal distance/time is one half the distance/time to the nearest neighbor; - - Intrazonal bridge and value tolls are assumed to be zero +Link - attributes: +- "length" in feet +- "vdf", volume delay function (volume delay functions must also be setup) +- "@useclass", vehicle-class restrictions classification, auto-only, HOV only +- "@free_flow_time", the free flow time (in minutes) +- "@tollXX_YY", the toll for period XX and class subgroup (see truck + class) named YY, used together with @tollbooth to generate @bridgetoll_YY + and @valuetoll_YY +- "@maz_flow", the background traffic MAZ-to-MAZ SP assigned flow from highway_maz, + if controller.iteration > 0 +- modes: must be set on links and match the specified mode codes in + the traffic config + + Network results - attributes: +- @flow_XX: link PCE flows per class, where XX is the class name in the config +- timau: auto travel time +- volau: total assigned flow in PCE + +Notes: +- Output matrices are in miles, minutes, and cents (2010 dollars) and are stored/ +as real values; +- Intrazonal distance/time is one half the distance/time to the nearest neighbor; +- Intrazonal bridge and value tolls are assumed to be zero """ from __future__ import annotations -from contextlib import contextmanager as _context + import os -from typing import Dict, Union, List, TYPE_CHECKING +from contextlib import contextmanager as _context +from typing import TYPE_CHECKING, Dict, List, Union import numpy as np +from tm2py import tools from tm2py.components.component import Component -from tm2py.components.demand.demand import PrepareHighwayDemand +from tm2py.components.demand.prepare_demand import PrepareHighwayDemand from tm2py.emme.manager import EmmeScenario from tm2py.emme.matrix import MatrixCache, OMXManager from tm2py.emme.network import NetworkCalculator from tm2py.logger import LogStartEnd -from tm2py import tools if TYPE_CHECKING: from tm2py.controller import RunController @@ -89,34 +89,68 @@ class HighwayAssignment(Component): """ def __init__(self, controller: RunController): + """Constructor for HighwayAssignment components. + + Args: + controller (RunController): Reference to current run controller. + """ super().__init__(controller) - self._num_processors = tools.parse_num_processors( - self.config.emme.num_processors - ) + + self.config = self.controller.config.highway + self._matrix_cache = None self._skim_matrices = [] + self._class_config = None + self._scenario = None + self._highway_emmebank = None + + @property + def highway_emmebank(self): + if not self._highway_emmebank: + self._highway_emmebank = self.controller.emme_manager.highway_emmebank + return self._highway_emmebank + + @property + def classes(self): + # self.hwy_classes + return [c.name for c in self.config.classes] + + @property + def class_config(self): + # self.hwy_class_configs + if not self._class_config: + self._class_config = {c.name: c for c in self.config.classes} + + return self._class_config + + def validate_inputs(self): + """Validate inputs files are correct, raise if an error is found.""" + # TODO + pass @LogStartEnd("Highway assignment and skims", level="STATUS") def run(self): - """Run highway assignment""" + """Run highway assignment.""" demand = PrepareHighwayDemand(self.controller) - demand.run() - for time in self.time_period_names(): - scenario = self.get_emme_scenario( - self.config.emme.highway_database_path, time - ) + if self.controller.iteration >= 0: + demand.run() + else: + self.highway_emmebank.zero_matrix + for time in self.time_period_names: + scenario = self.highway_emmebank.scenario(time) with self._setup(scenario, time): iteration = self.controller.iteration assign_classes = [ - AssignmentClass(c, time, iteration) - for c in self.config.highway.classes + AssignmentClass(c, time, iteration) for c in self.config.classes ] if iteration > 0: self._copy_maz_flow(scenario) else: self._reset_background_traffic(scenario) self._create_skim_matrices(scenario, assign_classes) - assign_spec = self._get_assignment_spec(assign_classes) + assign_spec = self._get_assignment_spec( + assign_classes, path_analysis=False + ) # self.logger.log_dict(assign_spec, level="DEBUG") with self.logger.log_start_end( "Run SOLA assignment with path analyses", level="INFO" @@ -126,21 +160,63 @@ def run(self): ) assign(assign_spec, scenario, chart_log_interval=1) + # calucaltes link level LOS based reliability + net_calc = NetworkCalculator(self.controller, scenario) + + exf_pars = scenario.emmebank.extra_function_parameters + vdfs = [ + f for f in scenario.emmebank.functions() if f.type == "VOLUME_DELAY" + ] + for function in vdfs: + expression = function.expression + for el in ["el1", "el2", "el3", "el4"]: + expression = expression.replace(el, getattr(exf_pars, el)) + if "@static_rel" in expression: + # split function into time component and reliability component + time_expr, reliability_expr = expression.split( + "*(1+@static_rel+" + ) + net_calc( + "@auto_time", + time_expr, + {"link": "vdf=%s" % function.id[2:]}, + ) + net_calc( + "@reliability", + "(@static_rel+" + reliability_expr, + {"link": "vdf=%s" % function.id[2:]}, + ) + net_calc("@reliability_sq", "@reliability**2", {"link": "all"}) + + assign_spec = self._get_assignment_spec( + assign_classes, path_analysis=True + ) + with self.logger.log_start_end( + "Run SOLA assignment with path analyses and highway reliability", + level="INFO", + ): + assign = self.controller.emme_manager.tool( + "inro.emme.traffic_assignment.sola_traffic_assignment" + ) + assign(assign_spec, scenario, chart_log_interval=1) + # Subtract non-time costs from gen cost to get the raw travel time for emme_class_spec in assign_spec["classes"]: self._calc_time_skim(emme_class_spec) # Set intra-zonal for time and dist to be 1/2 nearest neighbour - for class_config in self.config.highway.classes: + for class_config in self.config.classes: self._set_intrazonal_values( time, class_config["name"], class_config["skims"], ) self._export_skims(scenario, time) + if self.logger.debug_enabled: + self._log_debug_report(scenario, time) @_context def _setup(self, scenario: EmmeScenario, time_period: str): - """Setup and teardown for Emme Matrix cache and list of skim matrices + """Setup and teardown for Emme Matrix cache and list of skim matrices. Args: scenario: Emme scenario object @@ -161,22 +237,24 @@ def _copy_maz_flow(self, scenario: EmmeScenario): """Copy maz_flow from MAZ demand assignment to ul1 for background traffic. Args: - scenario: Emme scenario object""" - self.logger.log_time( + scenario: Emme scenario object + """ + self.logger.log( "Copy @maz_flow to ul1 for background traffic", indent=True, level="DETAIL" ) - net_calc = NetworkCalculator(scenario) + net_calc = NetworkCalculator(self.controller, scenario) net_calc("ul1", "@maz_flow") def _reset_background_traffic(self, scenario: EmmeScenario): - """Set ul1 for background traffic to 0 (no maz-maz flow) + """Set ul1 for background traffic to 0 (no maz-maz flow). Args: - scenario: Emme scenario object""" - self.logger.log_time( + scenario: Emme scenario object + """ + self.logger.log( "Set ul1 to 0 for background traffic", indent=True, level="DETAIL" ) - net_calc = NetworkCalculator(scenario) + net_calc = NetworkCalculator(self.controller, scenario) net_calc("ul1", "0") def _create_skim_matrices( @@ -202,16 +280,15 @@ def _create_skim_matrices( matrix = create_matrix( "mf", matrix_name, scenario=scenario, overwrite=True ) - self.logger.log( - f"Create matrix name: {matrix_name}, id: {matrix.id}", - level="DEBUG", + self.logger.debug( + f"Create matrix name: {matrix_name}, id: {matrix.id}" ) self._skim_matrices.append(matrix) def _get_assignment_spec( - self, assign_classes: List[AssignmentClass] + self, assign_classes: List[AssignmentClass], path_analysis=True ) -> EmmeTrafficAssignmentSpec: - """Generate template Emme SOLA assignment specification + """Generate template Emme SOLA assignment specification. Args: assign_classes: list of AssignmentClass objects @@ -220,8 +297,8 @@ def _get_assignment_spec( Emme specification for SOLA traffic assignment """ - relative_gap = self.config.highway.relative_gap - max_iterations = self.config.highway.max_iterations + relative_gap = self.config.relative_gap + max_iterations = self.config.max_iterations # NOTE: mazmazvol as background traffic in link.data1 ("ul1") base_spec = { "type": "SOLA_TRAFFIC_ASSIGNMENT", @@ -237,8 +314,14 @@ def _get_assignment_spec( "relative_gap": relative_gap, "normalized_gap": 0.0, }, - "performance_settings": {"number_of_processors": self._num_processors}, + "performance_settings": { + "number_of_processors": self.controller.num_processors + }, } + if not path_analysis: + base_spec["classes"] = [ + klass.emme_highway_class_spec_wo_pa for klass in assign_classes + ] return base_spec def _calc_time_skim(self, emme_class_spec: EmmeHighwayClassSpec): @@ -271,8 +354,9 @@ def _set_intrazonal_values( skims: list of requested skims (from config) """ for skim_name in skims: - matrix_name = f"mf{time_period}_{class_name}_{skim_name}" - if skim_name in ["time", "distance", "freeflowtime", "hovdist", "tolldist"]: + if skim_name in ["time", "dist", "freeflowtime", "hovdist", "tolldist"]: + matrix_name = f"mf{time_period}_{class_name}_{skim_name}" + self.logger.debug(f"Setting intrazonals to 0.5*min for {matrix_name}") data = self._matrix_cache.get_data(matrix_name) # NOTE: sets values for external zones as well np.fill_diagonal(data, np.inf) @@ -287,8 +371,17 @@ def _export_skims(self, scenario: EmmeScenario, time_period: str): time_period: time period name """ # NOTE: skims in separate file by period + self.logger.debug( + "_export_skims: self.config.output_skim_path:{}".format( + self.config.output_skim_path + ) + ) omx_file_path = self.get_abs_path( - self.config.highway.output_skim_path.format(period=time_period) + self.config.output_skim_path + / self.config.output_skim_filename_tmpl.format(time_period=time_period) + ) + self.logger.debug( + f"export {len(self._skim_matrices)} skim matrices to {omx_file_path}" ) os.makedirs(os.path.dirname(omx_file_path), exist_ok=True) with OMXManager( @@ -296,11 +389,38 @@ def _export_skims(self, scenario: EmmeScenario, time_period: str): ) as omx_file: omx_file.write_matrices(self._skim_matrices) + def _log_debug_report(self, scenario: EmmeScenario, time_period: str): + num_zones = len(scenario.zone_numbers) + num_cells = num_zones * num_zones + self.logger.debug(f"Highway skim summary for period {time_period}") + self.logger.debug( + f"Number of zones: {num_zones}. Number of O-D pairs: {num_cells}. " + "Values outside -9999999, 9999999 are masked in summaries." + ) + self.logger.debug( + "name min max mean sum" + ) + for matrix in self._skim_matrices: + values = self._matrix_cache.get_data(matrix) + data = np.ma.masked_outside(values, -9999999, 9999999) + stats = ( + f"{matrix.name:25} {data.min():9.4g} {data.max():9.4g} " + f"{data.mean():9.4g} {data.sum(): 13.7g}" + ) + self.logger.debug(stats) + class AssignmentClass: - """Highway assignment class, represents data from config and conversion to Emme specs""" + """Highway assignment class, represents data from config and conversion to Emme specs.""" def __init__(self, class_config, time_period, iteration): + """Constructor of Highway Assignment class. + + Args: + class_config (_type_): _description_ + time_period (_type_): _description_ + iteration (_type_): _description_ + """ self.class_config = class_config self.time_period = time_period self.iteration = iteration @@ -309,7 +429,7 @@ def __init__(self, class_config, time_period, iteration): @property def emme_highway_class_spec(self) -> EmmeHighwayClassSpec: - """Construct and return Emme traffic assignment class specification + """Construct and return Emme traffic assignment class specification. Converted from input config (highway.classes), see Emme Help for SOLA traffic assignment for specification details. @@ -341,6 +461,39 @@ class specification used in the SOLA assignment. } return class_spec + @property + def emme_highway_class_spec_wo_pa(self) -> EmmeHighwayClassSpec: + """Construct and return Emme traffic assignment class specification. + + Converted from input config (highway.classes), see Emme Help for + SOLA traffic assignment for specification details. + Adds time_period as part of demand and skim matrix names. + + Returns: + A nested dictionary corresponding to the expected Emme traffic + class specification used in the SOLA assignment. + """ + if self.iteration == 0: + demand_matrix = 'ms"zero"' + else: + demand_matrix = f'mf"{self.time_period}_{self.name}"' + class_spec = { + "mode": self.class_config.mode_code, + "demand": demand_matrix, + "generalized_cost": { + "link_costs": f"@cost_{self.name.lower()}", # cost in $0.01 + # $/hr -> min/$0.01 + "perception_factor": 0.6 / self.class_config.value_of_time, + }, + "results": { + "link_volumes": f"@flow_{self.name.lower()}", + "od_travel_times": { + "shortest_paths": f"mf{self.time_period}_{self.name}_time" + }, + }, + } + return class_spec + @property def emme_class_analysis(self) -> List[EmmeHighwayAnalysisSpec]: """Construct and return a list of path analyses specs which generate the required skims. @@ -362,9 +515,10 @@ def emme_class_analysis(self) -> List[EmmeHighwayAnalysisSpec]: continue if "_" in skim_type: skim_type, group = skim_type.split("_") + matrix_name = f"mf{self.time_period}_{self.name}_{skim_type}_{group}" else: group = "" - matrix_name = f"mf{self.time_period}_{self.name}_{skim_type}{group}" + matrix_name = f"mf{self.time_period}_{self.name}_{skim_type}" class_analysis.append( self.emme_analysis_spec( self.skim_analysis_link_attribute(skim_type, group), @@ -389,9 +543,12 @@ def skim_matrices(self) -> List[str]: continue if "_" in skim_type: skim_type, group = skim_type.split("_") + skim_matrices.append( + f"{self.time_period}_{self.name}_{skim_type}_{group}" + ) else: group = "" - skim_matrices.append(f"{self.time_period}_{self.name}_{skim_type}{group}") + skim_matrices.append(f"{self.time_period}_{self.name}_{skim_type}") return skim_matrices @staticmethod @@ -446,5 +603,7 @@ def skim_analysis_link_attribute(skim: str, group: str) -> str: "freeflowtime": "@free_flow_time", "bridgetoll": f"@bridgetoll_{group}", "valuetoll": f"@valuetoll_{group}", + "rlbty": "@reliability_sq", + "autotime": "@auto_time", } return lookup[skim] diff --git a/tm2py/components/network/highway/highway_maz.py b/tm2py/components/network/highway/highway_maz.py index 21437d87..cc74fdd1 100644 --- a/tm2py/components/network/highway/highway_maz.py +++ b/tm2py/components/network/highway/highway_maz.py @@ -28,23 +28,20 @@ from __future__ import annotations import array as _array +import os from collections import defaultdict as _defaultdict from contextlib import contextmanager as _context from math import sqrt as _sqrt -import os -from typing import Dict, List, Union, BinaryIO, TYPE_CHECKING +from typing import TYPE_CHECKING, BinaryIO, Dict, List, Union import numpy as np import pandas as pd -# from tables import NoSuchNodeError - from tm2py.components.component import Component from tm2py.emme.manager import EmmeNode from tm2py.emme.matrix import OMXManager -from tm2py.emme.network import NetworkCalculator -from tm2py.logger import LogStartEnd -from tm2py.tools import parse_num_processors + +# from tables import NoSuchNodeError if TYPE_CHECKING: from tm2py.controller import RunController @@ -72,53 +69,71 @@ def __init__(self, controller: RunController): Args: controller: parent Controller object """ + super().__init__(controller) - self._scenario = None + self.config = self.controller.config.highway.maz_to_maz + self._debug = False + # bins: performance parameter: crow-fly distance bins # to limit shortest path calculation by origin to furthest destination # semi-exposed for performance testing self._bin_edges = _default_bin_edges - self._debug = False - # Internal attributes to track data through the sequence of steps + # Lazily-loaded Emme Properties + self._highway_emmebank = None self._eb_dir = None + + # Internal attributes to track data through the sequence of steps + self._scenario = None self._mazs = None - self._demand = None + self._demand = _defaultdict(lambda: []) self._max_dist = 0 self._network = None self._root_index = None self._leaf_index = None + @property + def highway_emmebank(self): + if self._highway_emmebank is None: + self._highway_emmebank = self.controller.emme_manager.highway_emmebank + return self._highway_emmebank + + @property + def eb_dir(self): + if self._eb_dir is None: + self._eb_dir = os.path.dirname(self.highway_emmebank.path) + return self._eb_dir + + def validate_inputs(self): + """Validate inputs files are correct, raise if an error is found.""" + # TODO + pass + @LogStartEnd() def run(self): """Run MAZ-to-MAZ shortest path assignment.""" - emme_manager = self.controller.emme_manager - emmebank = emme_manager.emmebank( - self.get_abs_path(self.config.emme.highway_database_path) - ) - self._eb_dir = os.path.dirname(emmebank.path) + county_groups = {} - for group in self.config.highway.maz_to_maz.demand_county_groups: + for group in self.config.demand_county_groups: county_groups[group.number] = group.counties - for time in self.time_period_names(): - with self.logger.log_start_end(f"period {time}"): - self._scenario = self.get_emme_scenario(emmebank.path, time) - with self._setup(time): - self._prepare_network() - for i, names in county_groups.items(): - maz_ids = self._get_county_mazs(names) - if len(maz_ids) == 0: - self.logger.log( - f"warning: no mazs for counties {', '.join(names)}" - ) - continue - self._process_demand(time, i, maz_ids) - demand_bins = self._group_demand() - for i, demand_group in enumerate(demand_bins): - self._find_roots_and_leaves(demand_group["demand"]) - self._set_link_cost_maz() - self._run_shortest_path(time, i, demand_group["dist"]) - self._assign_flow(time, i, demand_group["demand"]) + for time in self.time_period_names: + self._scenario = self.highway_emmebank.scenario(time) + with self._setup(time): + self._prepare_network() + for i, names in county_groups.items(): + maz_ids = self._get_county_mazs(names) + if len(maz_ids) == 0: + self.logger.log( + f"warning: no mazs for counties {', '.join(names)}" + ) + continue + self._process_demand(time, i, maz_ids) + demand_bins = self._group_demand() + for i, demand_group in enumerate(demand_bins): + self._find_roots_and_leaves(demand_group["demand"]) + self._set_link_cost_maz() + self._run_shortest_path(time, i, demand_group["dist"]) + self._assign_flow(time, i, demand_group["demand"]) @_context def _setup(self, time: str): @@ -139,11 +154,16 @@ def _setup(self, time: str): ("NODE", "@maz_root", "Flag for MAZs which are roots"), ("NODE", "@maz_leaf", "Flag for MAZs which are leaves"), ] + for domain, name, desc in attributes: + self.logger.log(f"Create temp {domain} attr: {name}, {desc}", level="TRACE") with self.controller.emme_manager.temp_attributes_and_restore( self._scenario, attributes ): try: - yield + with self.logger.log_start_end( + f"MAZ assign for period {time} scenario {self._scenario}" + ): + yield finally: if not self._debug: self._mazs = None @@ -153,9 +173,7 @@ def _setup(self, time: str): self._leaf_index = None # delete sp path files for bin_no in range(len(self._bin_edges)): - file_path = os.path.join( - self._eb_dir, f"sp_{time}_{bin_no}.ebp" - ) + file_path = os.path.join(self.eb_dir, f"sp_{time}_{bin_no}.ebp") if os.path.exists(file_path): os.remove(file_path) @@ -171,10 +189,14 @@ def _prepare_network(self): else: time_attr = "@free_flow_time" self.logger.log(f"Calculating link costs using time {time_attr}", level="DEBUG") - vot = self.config.highway.maz_to_maz.value_of_time - op_cost = self.config.highway.maz_to_maz.operating_cost_per_mile - net_calc = NetworkCalculator(self._scenario) - net_calc("@link_cost", f"{time_attr} + 0.6 / {vot} * (length * {op_cost})") + vot = self.config.value_of_time + op_cost = self.config.operating_cost_per_mile + net_calc = NetworkCalculator(self.controller, self._scenario) + report = net_calc( + "@link_cost", f"{time_attr} + 0.6 / {vot} * (length * {op_cost})" + ) + self.logger.log("Link cost calculation report", level="TRACE") + self.logger.log_dict(report, level="TRACE") self._network = self.controller.emme_manager.get_network( self._scenario, {"NODE": ["@maz_id", "x", "y", "#node_county"], "LINK": []} ) @@ -193,16 +215,38 @@ def _get_county_mazs(self, counties: List[str]) -> List[EmmeNode]: Returns: List of MAZ nodes (Emme Node) which are in these counties. """ + self.logger.log( + f"Processing county MAZs for {', '.join(counties)}", level="DETAIL" + ) network = self._network + # maz data + # maz_file = self.get_abs_path(self.controller.config.scenario.maz_landuse_file) + # maz_df = pd.read_csv(maz_file) + # maz_county_dict = dict(zip(maz_df["MAZ_ORIGINAL"], maz_df["CountyName"])) # NOTE: every maz must have a valid #node_county if self._mazs is None: self._mazs = _defaultdict(lambda: []) for node in network.nodes(): if node["@maz_id"]: + # self._mazs[maz_county_dict[node["@maz_id"]]].append(node) self._mazs[node["#node_county"]].append(node) mazs = [] for county in counties: mazs.extend(self._mazs[county]) + # highway emme network does not include the 5 inaccessiable MAZs, but the trip table is indexed by the full MAZ list + # https://app.asana.com/0/12291104512575/1199091221400653/f + if "San Francisco" in counties: + mazs.extend( + [ + {"@maz_id": 10186}, + {"@maz_id": 16084}, + {"@maz_id": 111432}, + {"@maz_id": 111433}, + ] + ) + if "Contra Costa" in counties: + mazs.extend([{"@maz_id": 411178}]) + self.logger.log(f"Num MAZs {len(mazs)}", level="DEBUG") return sorted(mazs, key=lambda n: n["@maz_id"]) def _process_demand(self, time: str, index: int, maz_ids: List[EmmeNode]): @@ -218,27 +262,58 @@ def _process_demand(self, time: str, index: int, maz_ids: List[EmmeNode]): maz_ids: indexed list of MAZ ID nodes for the county group (active counties for this demand file) """ + self.logger.log( + f"Process demand for time period {time} index {index}", level="DETAIL" + ) data = self._read_demand_array(time, index) origins, destinations = data.nonzero() + self.logger.log( + f"non-zero origins {len(origins)} destinations {len(destinations)}", + level="DEBUG", + ) + total_demand = 0 for orig, dest in zip(origins, destinations): # skip intra-maz demand if orig == dest: continue + if orig > len(maz_ids) - 1: + self.logger.log( + f"Network MAZ @maz_id={orig} #county_name does not match its county name in the input MAZ SE data.", + level="DEBUG", + ) + continue + if dest > len(maz_ids) - 1: + self.logger.log( + f"Network MAZ @maz_id={dest} #county_name does not match its county name in the input MAZ SE data.", + level="DEBUG", + ) + continue + check = maz_ids[99] orig_node = maz_ids[orig] dest_node = maz_ids[dest] dist = _sqrt( (dest_node.x - orig_node.x) ** 2 + (dest_node.y - orig_node.y) ** 2 ) + if (dist / 5280) > self.config.max_distance: + self.logger.log( + f"MAZ demand from {orig} to {dest} is over {self.config.max_distance} miles, do not assign", + level="DEBUG", + ) + continue if dist > self._max_dist: self._max_dist = dist + demand = data[orig][dest] + total_demand += demand self._demand[orig_node].append( { "orig": orig_node, "dest": dest_node, - "dem": data[orig][dest], + "dem": demand, "dist": dist, } ) + self.logger.log(f"Max distance found {self._max_dist}", level="DEBUG") + self.logger.log(f"Total inter-zonal demand {total_demand}", level="DEBUG") def _read_demand_array(self, time: str, index: int) -> NumpyArray: """Load the demand from file with the specified time and index name. @@ -247,25 +322,30 @@ def _read_demand_array(self, time: str, index: int) -> NumpyArray: time: time period name index: group index of the demand file, used to find the file by name """ - file_path_tmplt = self.get_abs_path(self.config.highway.maz_to_maz.demand_file) + file_path_tmplt = self.get_abs_path(self.config.demand_file) omx_file_path = self.get_abs_path( - file_path_tmplt.format(period=time, number=index) + file_path_tmplt.format( + period=time, number=index, iter=self.controller.iteration + ) ) + self.logger.log(f"Reading demand from {omx_file_path}", level="DEBUG") with OMXManager(omx_file_path, "r") as omx_file: - demand_array = omx_file.read("M0") + demand_array = omx_file.read(f"MAZ_AUTO_{index}_{time}") + omx_file.close() return demand_array def _group_demand( self, ) -> List[Dict[str, Union[float, List[Dict[str, Union[float, EmmeNode]]]]]]: - """Process the demand loaded from files and create groups based on the - origin to the furthest destination with demand. + """Process the demand loaded from files \ + and create groups based on the origin to the furthest destination with demand. Returns: List of dictionaries, containing the demand in the format {"orig": EmmeNode, "dest": EmmeNode, "dem": float (demand value)} """ + self.logger.log("Grouping demand in distance buckets", level="DETAIL") # group demand from same origin into distance bins by furthest # distance destination to limit shortest path search radius bin_edges = self._bin_edges[:] @@ -282,7 +362,7 @@ def _group_demand( group["demand"].extend(data) break for group in demand_groups: - self.logger.log_time( + self.logger.log( f"bin dist {group['dist']}, size {len(group['demand'])}", level="DEBUG" ) # Filter out groups without any demand @@ -315,8 +395,8 @@ def _find_roots_and_leaves(self, demand: List[Dict[str, Union[float, EmmeNode]]] leaf_maz_ids[d_node.number] = d_node["@maz_leaf"] = d_node["@maz_id"] self._root_index = {p: i for i, p in enumerate(sorted(root_maz_ids.keys()))} self._leaf_index = {q: i for i, q in enumerate(sorted(leaf_maz_ids.keys()))} - self.controller.emme_manager.copy_attr_values( - "NODE", self._network, self._scenario, ["@maz_root", "@maz_leaf"] + self.controller.emme_manager.copy_attribute_values( + self._network, self._scenario, {"NODE": ["@maz_root", "@maz_leaf"]} ) def _set_link_cost_maz(self): @@ -328,12 +408,13 @@ def _set_link_cost_maz(self): """ # forbid egress from MAZ nodes which are not demand roots / # access to MAZ nodes which are not demand leafs - net_calc = NetworkCalculator(self._scenario) + net_calc = NetworkCalculator(self.controller, self._scenario) net_calc.add_calc("@link_cost_maz", "@link_cost") net_calc.add_calc("@link_cost_maz", "1e20", "@maz_root=0 and !@maz_id=0") net_calc.add_calc("@link_cost_maz", "1e20", "@maz_leafj=0 and !@maz_idj=0") net_calc.run() + @LogStartEnd(level="DETAIL") def _run_shortest_path(self, time: str, bin_no: int, max_radius: float): """Run the shortest path tool to generate paths between the marked nodes. @@ -348,10 +429,10 @@ def _run_shortest_path(self, time: str, bin_no: int, max_radius: float): max_radius = max_radius * 5280 + 100 # add some buffer for rounding error ext = "ebp" if _USE_BINARY else "txt" file_name = f"sp_{time}_{bin_no}.{ext}" - num_processors = parse_num_processors(self.config.emme.num_processors) + spec = { "type": "SHORTEST_PATH", - "modes": [self.config.highway.maz_to_maz.mode_code], + "modes": [self.config.mode_code], "root_nodes": "@maz_root", "leaf_nodes": "@maz_leaf", "link_cost": "@link_cost_maz", @@ -371,11 +452,11 @@ def _run_shortest_path(self, time: str, bin_no: int, max_radius: float): }, "path_output": { "format": "BINARY" if _USE_BINARY else "TEXT", - "file": os.path.join(self._eb_dir, file_name), + "file": os.path.join(self.eb_dir, file_name), }, }, "performance_settings": { - "number_of_processors": num_processors, + "number_of_processors": self.controller.num_processors, "direction": "FORWARD", "method": "STANDARD", }, @@ -427,13 +508,15 @@ def _assign_flow_text( link["temp_flow"] += dem i_node = j_node assigned += dem - self.logger.log_time( - f"ASSIGN bin {bin_no}: total: {len(demand)}", level="DEBUG" - ) - self.logger.log_time( + self.logger.log(f"ASSIGN bin {bin_no}: total: {len(demand)}", level="DEBUG") + self.logger.log( f"assigned: {assigned}, not assigned: {not_assigned}", level="DEBUG" ) + self.controller.emme_manager.copy_attribute_values( + self._network, self._scenario, {"LINK": ["temp_flow"]}, {"LINK": ["data1"]} + ) + def _load_text_format_paths( self, time: str, bin_no: int ) -> Dict[int, Dict[int, List[int]]]: @@ -449,7 +532,7 @@ def _load_text_format_paths( """ paths = _defaultdict(lambda: {}) with open( - os.path.join(self._eb_dir, f"sp_{time}_{bin_no}.txt"), + os.path.join(self.eb_dir, f"sp_{time}_{bin_no}.txt"), "r", encoding="utf8", ) as paths_file: @@ -474,7 +557,7 @@ def _assign_flow_binary( {"orig": EmmeNode, "dest": EmmeNode, "dem": float (demand value)} """ file_name = f"sp_{time}_{bin_no}.ebp" - with open(os.path.join(self._eb_dir, file_name), "rb") as paths_file: + with open(os.path.join(self.eb_dir, file_name), "rb") as paths_file: # read set of path pointers by Orig-Dest sequence from file offset, leaves_nb, path_indicies = self._get_path_indices(paths_file) assigned = 0 @@ -494,10 +577,13 @@ def _assign_flow_binary( self._assign_path_flow(paths_file, start, end, data["dem"]) assigned += data["dem"] bytes_read += (end - start) * 4 - self.controller.emme_manager.copy_attr_values( - "LINK", self._network, self._scenario, ["temp_flow"], ["@maz_flow"] + self.controller.emme_manager.copy_attribute_values( + self._network, + self._scenario, + {"LINK": ["temp_flow"]}, + {"LINK": ["@maz_flow"]}, ) - self.logger.log_time( + self.logger.log( f"ASSIGN bin {bin_no}, total {len(demand)}, assign " f"{assigned}, not assign {not_assigned}, bytes {bytes_read}", level="DEBUG", @@ -571,7 +657,7 @@ def _assign_path_flow( # load sequence of Node IDs which define the path (L=32-bit unsigned integers) path = _array.array("L") path.fromfile(paths_file, end - start) - # proccess path to sequence of links and add flow + # process path to sequence of links and add flow path_iter = iter(path) i_node = next(path_iter) for j_node in path_iter: @@ -581,16 +667,37 @@ def _assign_path_flow( class SkimMAZCosts(Component): - """MAZ-to-MAZ shortest-path skim of time, distance and toll""" + """MAZ-to-MAZ shortest-path skim of time, distance and toll.""" def __init__(self, controller: RunController): - """MAZ-to-MAZ shortest-path skim of time, distance and toll + """MAZ-to-MAZ shortest-path skim of time, distance and toll. + Args: controller: parent RunController object """ super().__init__(controller) + self.config = self.controller.config.highway.maz_to_maz + # TODO add config requirement that most be a valid time period self._scenario = None self._network = None + self._highway_emmebank = None + + @property + def highway_emmebank(self): + if self._highway_emmebank is None: + self._highway_emmebank = self.controller.emme_manager.highway_emmebank + return self._highway_emmebank + + @property + def scenario(self): + if self._scenario is None: + self._scenario = self.highway_emmebank.scenario(self.config.skim_period) + return self._scenario + + def validate_inputs(self): + """Validate inputs files are correct, raise if an error is found.""" + # TODO + pass @LogStartEnd() def run(self): @@ -615,31 +722,15 @@ def run(self): operating_cost_per_mile: auto operating cost max_skim_cost: max cost value used to limit the shortest path search mode_code: - - config.emme.num_processors - - """ - ref_period = None - ref_period_name = self.config.highway.maz_to_maz.skim_period - for period in self.config.time_periods: - if period.name == ref_period_name: - ref_period = period - break - if ref_period is None: - raise Exception( - "highway.maz_to_maz.skim_period: is not the name of an existing time_period" - ) - self._scenario = self.get_emme_scenario( - self.config.emme.highway_database_path, ref_period.name - ) + # prepare output file and write header - output = self.get_abs_path(self.config.highway.maz_to_maz.output_skim_file) + output = self.get_abs_path(self.config.output_skim_file) os.makedirs(os.path.dirname(output), exist_ok=True) with open(output, "w", encoding="utf8") as output_file: output_file.write("FROM_ZONE, TO_ZONE, COST, DISTANCE, BRIDGETOLL\n") counties = [] - for group in self.config.highway.maz_to_maz.demand_county_groups: + for group in self.config.demand_county_groups: counties.extend(group.counties) with self._setup(): self._prepare_network() @@ -658,26 +749,27 @@ def _setup(self): ("NODE", "@maz_root", "selected roots (origins)"), ] with self.controller.emme_manager.temp_attributes_and_restore( - self._scenario, attributes + self.scenario, attributes ): try: yield finally: self._network = None # clear network obj ref to free memory - @LogStartEnd() + @LogStartEnd(level="DEBUG") def _prepare_network(self): - """Calculates the link cost in @link_cost and loads the network to self._network""" - net_calc = NetworkCalculator(self._scenario) + """Calculates the link cost in @link_cost and loads the network to self._network.""" + net_calc = NetworkCalculator(self.controller, self._scenario) if self._scenario.has_traffic_results: time_attr = "(@free_flow_time.max.timau)" else: time_attr = "@free_flow_time" - vot = self.config.highway.maz_to_maz.value_of_time - op_cost = self.config.highway.maz_to_maz.operating_cost_per_mile + self.logger.log(f"Time attribute {time_attr}", level="DEBUG") + vot = self.config.value_of_time + op_cost = self.config.operating_cost_per_mile net_calc("@link_cost", f"{time_attr} + 0.6 / {vot} * (length * {op_cost})") self._network = self.controller.emme_manager.get_network( - self._scenario, {"NODE": ["@maz_id", "#node_county"]} + self.scenario, {"NODE": ["@maz_id", "#node_county"]} ) def _mark_roots(self, county: str) -> int: @@ -690,9 +782,10 @@ def _mark_roots(self, county: str) -> int: else: node["@maz_root"] = 0 values = self._network.get_attribute_values("NODE", ["@maz_root"]) - self._scenario.set_attribute_values("NODE", ["@maz_root"], values) + self.scenario.set_attribute_values("NODE", ["@maz_root"], values) return count_roots + @LogStartEnd(level="DETAIL") def _run_shortest_path(self) -> Dict[str, NumpyArray]: """Run shortest paths tool and return dictionary of skim results name, numpy arrays. @@ -706,11 +799,10 @@ def _run_shortest_path(self) -> Dict[str, NumpyArray]: shortest_paths_tool = self.controller.emme_manager.tool( "inro.emme.network_calculation.shortest_path" ) - num_processors = parse_num_processors(self.config.emme.num_processors) - max_cost = float(self.config.highway.maz_to_maz.max_skim_cost) + max_cost = float(self.config.max_skim_cost) spec = { "type": "SHORTEST_PATH", - "modes": [self.config.highway.maz_to_maz.mode_code], + "modes": [self.config.mode_code], "root_nodes": "@maz_root", "leaf_nodes": "@maz_id", "link_cost": "@link_cost", @@ -748,12 +840,12 @@ def _run_shortest_path(self) -> Dict[str, NumpyArray]: } }, "performance_settings": { - "number_of_processors": num_processors, + "number_of_processors": self.controller.num_processors, "direction": "FORWARD", "method": "STANDARD", }, } - sp_values = shortest_paths_tool(spec, self._scenario) + sp_values = shortest_paths_tool(spec, self.scenario) return sp_values def _export_results(self, sp_values: Dict[str, NumpyArray]): @@ -786,6 +878,6 @@ def _export_results(self, sp_values: Dict[str, NumpyArray]): result_df = result_df.query("COST > 0 & COST < 1e19") # write remaining values to text file # FROM_ZONE,TO_ZONE,COST,DISTANCE,BRIDGETOLL - output = self.get_abs_path(self.config.highway.maz_to_maz.output_skim_file) + output = self.get_abs_path(self.config.output_skim_file) with open(output, "a", newline="", encoding="utf8") as output_file: result_df.to_csv(output_file, header=False, index=False) diff --git a/tm2py/components/network/highway/highway_network.py b/tm2py/components/network/highway/highway_network.py index ae1cfec7..f456b604 100644 --- a/tm2py/components/network/highway/highway_network.py +++ b/tm2py/components/network/highway/highway_network.py @@ -18,7 +18,7 @@ toll class values highway.tolls.dst_vehicle_group_names: corresponding names used in network attributes toll classes - highway.tolls.tollbooth_start_index: index to split point bridge tolls + highway.tolls.valuetoll_start_tollbooth_code: index to split point bridge tolls (< this value) from distance value tolls (>= this value) highway.classes: the list of assignment classes, see the notes under highway_assign for detailed explanation @@ -46,27 +46,43 @@ - "@cost_YY": total cost for class YY """ +import heapq as _heapq +import os +from typing import TYPE_CHECKING, Dict, List, Set -from typing import Dict, List, Set +import pandas as pd -from tm2py.components.component import Component +from tm2py.components.component import Component, FileFormatError +from tm2py.emme.manager import EmmeNetwork, EmmeScenario from tm2py.logger import LogStartEnd -from tm2py.emme.manager import EmmeScenario, EmmeNetwork + +if TYPE_CHECKING: + from tm2py.controller import RunController class PrepareNetwork(Component): - """Highway network preparation""" + """Highway network preparation.""" + + def __init__(self, controller: "RunController"): + """Constructor for PPrepareNetwork. + + Args: + controller (RunController): Reference to run controller object. + """ + super().__init__(controller) + self.config = self.controller.config.highway + self._emme_manager = self.controller.emme_manager + self._highway_emmebank = None + self._highway_scenarios = None - @LogStartEnd("prepare network attributes and modes") + @LogStartEnd("Prepare network attributes and modes") def run(self): - """Run network preparation step""" - for time in self.time_period_names(): + """Run network preparation step.""" + for time in self.time_period_names: with self.controller.emme_manager.logbook_trace( f"prepare for highway assignment {time}" ): - scenario = self.get_emme_scenario( - self.config.emme.highway_database_path, time - ) + scenario = self.highway_emmebank.scenario(time) self._create_class_attributes(scenario, time) network = scenario.get_network() self._set_tolls(network, time) @@ -74,8 +90,52 @@ def run(self): self._set_link_modes(network) self._calc_link_skim_lengths(network) self._calc_link_class_costs(network) + self._calc_interchange_distance(network) + self._calc_link_static_reliability(network) scenario.publish_network(network) + @property + def highway_emmebank(self): + if not self._highway_emmebank: + self._highway_emmebank = self.controller.emme_manager.highway_emmebank + return self._highway_emmebank + + @property + def highway_scenarios(self): + if self._highway_scenarios is None: + self._highway_scenarios = { + tp: self.highway_emmebank.scenario(tp) for tp in self.time_period_names + } + return self._highway_scenarios + + def validate_inputs(self): + """Validate inputs files are correct, raise if an error is found.""" + toll_file_path = self.get_abs_path(self.config.tolls.file_path) + if not os.path.exists(toll_file_path): + self.logger.log( + f"Tolls file (config.highway.tolls.file_path) does not exist: {toll_file_path}", + level="ERROR", + ) + raise FileNotFoundError(f"Tolls file does not exist: {toll_file_path}") + src_veh_groups = self.config.tolls.src_vehicle_group_names + columns = ["fac_index"] + for time in self.controller.config.time_periods: + for vehicle in src_veh_groups: + columns.append(f"toll{time.name.lower()}_{vehicle}") + with open(toll_file_path, "r", encoding="UTF8") as toll_file: + header = set(h.strip() for h in next(toll_file).split(",")) + missing = [] + for column in columns: + if column not in header: + missing.append(column) + self.logger.log( + f"Tolls file missing column: {column}", level="ERROR" + ) + if missing: + raise FileFormatError( + f"Tolls file missing {len(missing)} columns: {', '.join(missing)}" + ) + def _create_class_attributes(self, scenario: EmmeScenario, time_period: str): """Create required network attributes including per-class cost and flow attributes.""" create_attribute = self.controller.emme_manager.tool( @@ -88,10 +148,19 @@ def _create_class_attributes(self, scenario: EmmeScenario, time_period: str): ("@maz_flow", "Assigned MAZ-to-MAZ flow"), ("@hov_length", "length with HOV lanes"), ("@toll_length", "length with tolls"), - ] + ("@intdist_down", "dist to the closest d-stream interchange"), + ("@intdist_up", "dist from the closest upstream int"), + ("@static_rel", "static reliability"), + ("@reliability", "link total reliability"), + ("@reliability_sq", "link total reliability variance"), + ("@auto_time", "link total reliability"), + ], + "NODE": [ + ("@interchange", "interchange"), + ], } # toll field attributes by bridge and value and toll definition - dst_veh_groups = self.config.highway.tolls.dst_vehicle_group_names + dst_veh_groups = self.config.tolls.dst_vehicle_group_names for dst_veh in dst_veh_groups: for toll_type in "bridge", "value": attributes["LINK"].append( @@ -101,7 +170,7 @@ def _create_class_attributes(self, scenario: EmmeScenario, time_period: str): ) ) # results for link cost and assigned flow - for assign_class in self.config.highway.classes: + for assign_class in self.config.classes: attributes["LINK"].append( ( f"@cost_{assign_class.name.lower()}", @@ -121,60 +190,75 @@ def _create_class_attributes(self, scenario: EmmeScenario, time_period: str): def _set_tolls(self, network: EmmeNetwork, time_period: str): """Set the tolls in the network from the toll reference file.""" toll_index = self._get_toll_indices() - src_veh_groups = self.config.highway.tolls.src_vehicle_group_names - dst_veh_groups = self.config.highway.tolls.dst_vehicle_group_names - tollbooth_start_index = self.config.highway.tolls.tollbooth_start_index + src_veh_groups = self.config.tolls.src_vehicle_group_names + dst_veh_groups = self.config.tolls.dst_vehicle_group_names + valuetoll_start_tollbooth_code = ( + self.config.tolls.valuetoll_start_tollbooth_code + ) for link in network.links(): - if link["@tollbooth"]: - index = ( + # set bridgetoll + if ( + link["@tollbooth"] > 0 + and link["@tollbooth"] < valuetoll_start_tollbooth_code + ): + index = int( link["@tollbooth"] * 1000 + link["@tollseg"] * 10 + link["@useclass"] ) data_row = toll_index.get(index) if data_row is None: - self.logger.log( + self.logger.warn( f"set tolls failed index lookup {index}, link {link.id}", - level="TRACE", + indent=True, ) continue # tolls will remain at zero - # if index is below tollbooth start index then this is a bridge - # (point toll), available for all traffic assignment classes - if link["@tollbooth"] < tollbooth_start_index: - for src_veh, dst_veh in zip(src_veh_groups, dst_veh_groups): - link[f"@bridgetoll_{dst_veh}"] = ( - data_row[f"toll{time_period.lower()}_{src_veh}"] * 100 - ) - else: # else, this is a tollway with a per-mile charge - for src_veh, dst_veh in zip(src_veh_groups, dst_veh_groups): - link[f"@valuetoll_{dst_veh}"] = ( - data_row[f"toll{time_period.lower()}_{src_veh}"] - * link.length - * 100 - ) + for src_veh, dst_veh in zip(src_veh_groups, dst_veh_groups): + link[f"@bridgetoll_{dst_veh}"] = ( + float(data_row[f"toll{time_period.lower()}_{src_veh}"]) * 100 + ) + # set valuetoll + elif link["@tollbooth"] >= valuetoll_start_tollbooth_code: + data_row = toll_index.get(index) + if data_row is None: + self.logger.warn( + f"set tolls failed index lookup {index}, link {link.id}", + indent=True, + ) + continue # tolls will remain at zero + for src_veh, dst_veh in zip(src_veh_groups, dst_veh_groups): + link[f"@valuetoll_{dst_veh}"] = ( + float(data_row[f"toll{time_period.lower()}_{src_veh}"]) + * link.length + * 100 + ) + else: + continue def _get_toll_indices(self) -> Dict[int, Dict[str, str]]: """Get the mapping of toll lookup table from the toll reference file.""" - toll_file_path = self.get_abs_path(self.config.highway.tolls.file_path) + toll_file_path = self.get_abs_path(self.config.tolls.file_path) + self.logger.debug(f"toll_file_path {toll_file_path}", indent=True) tolls = {} with open(toll_file_path, "r", encoding="UTF8") as toll_file: - header = next(toll_file).split(",") + header = [h.strip() for h in next(toll_file).split(",")] for line in toll_file: data = dict(zip(header, line.split(","))) tolls[int(data["fac_index"])] = data return tolls def _set_vdf_attributes(self, network: EmmeNetwork, time_period: str): - """Set capacity, VDF and critical speed on links""" + """Set capacity, VDF and critical speed on links.""" capacity_map = {} critical_speed_map = {} - for row in self.config.highway.capclass_lookup: + for row in self.config.capclass_lookup: if row.get("capacity") is not None: capacity_map[row["capclass"]] = row.get("capacity") if row.get("critical_speed") is not None: critical_speed_map[row["capclass"]] = row.get("critical_speed") tp_mapping = { - tp.name: tp.highway_capacity_factor for tp in self.config.time_periods + tp.name.upper(): tp.highway_capacity_factor + for tp in self.controller.config.time_periods } period_capacity_factor = tp_mapping[time_period] akcelik_vdfs = [3, 4, 5, 7, 8, 10, 11, 12, 13, 14] @@ -198,12 +282,11 @@ def _set_link_modes(self, network: EmmeNetwork): """Set the link modes based on the per-class 'excluded_links' set.""" # first reset link modes (script run more than once) # "generic_highway_mode_code" must already be created (in import to Emme script) - auto_mode = {network.mode(self.config.highway.generic_highway_mode_code)} + auto_mode = {network.mode(self.config.generic_highway_mode_code)} used_modes = { - network.mode(assign_class.mode_code) - for assign_class in self.config.highway.classes + network.mode(assign_class.mode_code) for assign_class in self.config.classes } - used_modes.add(network.mode(self.config.highway.maz_to_maz.mode_code)) + used_modes.add(network.mode(self.config.maz_to_maz.mode_code)) for link in network.links(): link.modes -= used_modes if link["@drive_link"]: @@ -214,13 +297,13 @@ def _set_link_modes(self, network: EmmeNetwork): # Create special access/egress mode for MAZ connectors maz_access_mode = network.create_mode( - "AUX_AUTO", self.config.highway.maz_to_maz.mode_code + "AUX_AUTO", self.config.maz_to_maz.mode_code ) maz_access_mode.description = "MAZ access" # create modes from class spec # (duplicate mode codes allowed provided the excluded_links is the same) mode_excluded_links = {} - for assign_class in self.config.highway.classes: + for assign_class in self.config.classes: if assign_class.mode_code in mode_excluded_links: if ( assign_class.excluded_links @@ -238,7 +321,7 @@ def _set_link_modes(self, network: EmmeNetwork): mode.description = assign_class.name mode_excluded_links[mode.id] = assign_class.excluded_links - dst_veh_groups = self.config.highway.tolls.dst_vehicle_group_names + dst_veh_groups = self.config.tolls.dst_vehicle_group_names for link in network.links(): modes = set(m.id for m in link.modes) if link.i_node["@maz_id"] + link.j_node["@maz_id"] > 0: @@ -258,12 +341,12 @@ def _set_link_modes(self, network: EmmeNetwork): link[f"@valuetoll_{dst_veh}"] > 0 ) self._apply_exclusions( - self.config.highway.maz_to_maz.excluded_links, + self.config.maz_to_maz.excluded_links, maz_access_mode.id, modes, exclude_links_map, ) - for assign_class in self.config.highway.classes: + for assign_class in self.config.classes: self._apply_exclusions( assign_class.excluded_links, assign_class.mode_code, @@ -287,7 +370,9 @@ def _apply_exclusions( def _calc_link_skim_lengths(self, network: EmmeNetwork): """Calculate the length attributes used in the highway skims.""" - tollbooth_start_index = self.config.highway.tolls.tollbooth_start_index + valuetoll_start_tollbooth_code = ( + self.config.tolls.valuetoll_start_tollbooth_code + ) for link in network.links(): # distance in hov lanes / facilities if 2 <= link["@useclass"] <= 3: @@ -295,19 +380,157 @@ def _calc_link_skim_lengths(self, network: EmmeNetwork): else: link["@hov_length"] = 0 # distance on non-bridge toll facilities - if link["@tollbooth"] > tollbooth_start_index: + if link["@tollbooth"] > valuetoll_start_tollbooth_code: link["@toll_length"] = link.length else: link["@toll_length"] = 0 def _calc_link_class_costs(self, network: EmmeNetwork): """Calculate the per-class link cost from the tolls and operating costs.""" - for assign_class in self.config.highway.classes: + for assign_class in self.config.classes: cost_attr = f"@cost_{assign_class.name.lower()}" op_cost = assign_class["operating_cost_per_mile"] toll_factor = assign_class.get("toll_factor") if toll_factor is None: toll_factor = 1.0 for link in network.links(): - toll_value = sum(link[toll_attr] for toll_attr in assign_class["toll"]) + try: + toll_value = sum( + link[toll_attr] for toll_attr in assign_class["toll"] + ) + except: + link link[cost_attr] = link.length * op_cost + toll_value * toll_factor + + def _calc_interchange_distance(self, network: EmmeNetwork): + """ + For highway reliability + Calculate upstream and downstream interchange distance + First, label the intersection nodes as nodes with freeway and freeway-to-freeway ramp + """ + # input interchange nodes file + # This is a file inherited from https://app.box.com/folder/148342877307, as implemented in the tm2.1 + interchange_nodes_file = self.get_abs_path(self.config.interchange_nodes_file) + interchange_nodes_df = pd.read_csv(interchange_nodes_file) + interchange_nodes_df = interchange_nodes_df[interchange_nodes_df.intx > 0] + interchange_points = interchange_nodes_df["N"].tolist() + network.create_attribute("NODE", "is_interchange") + for node in network.nodes(): + if node["#node_id"] in interchange_points: + node.is_interchange = True + node["@interchange"] = node.is_interchange + + mode_c = network.mode("c") + for link in network.links(): + if link["@ft"] in [1, 2] and mode_c in link.modes: + link["@intdist_down"] = PrepareNetwork.interchange_distance( + link, "DOWNSTREAM" + ) + link["@intdist_up"] = PrepareNetwork.interchange_distance( + link, "UPSTREAM" + ) + + network.delete_attribute("NODE", "is_interchange") + + @staticmethod + def interchange_distance(orig_link, direction): + visited = set([]) + visited_add = visited.add + back_links = {} + heap = [] + if direction == "DOWNSTREAM": + get_links = lambda l: l.j_node.outgoing_links() + check_far_node = lambda l: l.j_node.is_interchange + elif direction == "UPSTREAM": + get_links = lambda l: l.i_node.incoming_links() + check_far_node = lambda l: l.i_node.is_interchange + # Shortest path search for nearest interchange node along freeway + for link in get_links(orig_link): + _heapq.heappush(heap, (link["length"], link["#link_id"], link)) + interchange_found = False + + # Check first node + if check_far_node(orig_link): + interchange_found = True + link_cost = 0.0 + + try: + while not interchange_found: + link_cost, link_id, link = _heapq.heappop(heap) + if link in visited: + continue + visited_add(link) + if check_far_node(link): + interchange_found = True + break + get_links_return = get_links(link) + for next_link in get_links_return: + if next_link in visited: + continue + next_cost = link_cost + next_link["length"] + _heapq.heappush(heap, (next_cost, next_link["#link_id"], next_link)) + except TypeError: + # TypeError if the link type objects are compared in the tuples + # case where the path cost are the same + raise Exception("Path cost are the same, cannot compare Link objects") + except IndexError: + # IndexError if heap is empty + # case where start / end of highway, dist = 99 + return 99 + return orig_link["length"] / 2.0 + link_cost + + def _calc_link_static_reliability(self, network: EmmeNetwork): + """ + For highway reliability + consists of: lane factor, interchange distance, speed factor + differentiated by freeway, artertial, and others + """ + # Static reliability parameters + # freeway coefficients + freeway_rel = { + "intercept": 0.1078, + "speed>70": 0.01393, + "upstream": 0.011, + "downstream": 0.0005445, + } + # arterial/ramp/other coefficients + road_rel = { + "intercept": 0.0546552, + "lanes": {1: 0.0, 2: 0.0103589, 3: 0.0361211, 4: 0.0446958, 5: 0.0}, + "speed": { + "<35": 0, + 35: 0.0075674, + 40: 0.0091012, + 45: 0.0080996, + 50: -0.0022938, + ">50": -0.0046211, + }, + } + for link in network.links(): + # if freeway apply freeway parameters to this link + if (link["@ft"] in [1, 2]) and (link["@lanes"] > 0): + high_speed_factor = ( + freeway_rel["speed>70"] if link["@free_flow_speed"] >= 70 else 0 + ) + upstream_factor = freeway_rel["upstream"] * 1 / link["@intdist_up"] + downstream_factor = ( + freeway_rel["downstream"] * 1 / link["@intdist_down"] + ) + link["@static_rel"] = ( + freeway_rel["intercept"] + + high_speed_factor + + upstream_factor + + downstream_factor + ) + # arterial/ramp/other apply road parameters + elif (link["@ft"] < 8) and (link["@lanes"] > 0): + lane_factor = road_rel["lanes"].get(link["@lanes"], 0) + speed_bin = link["@free_flow_speed"] + if speed_bin < 35: + speed_bin = "<35" + elif speed_bin > 50: + speed_bin = ">50" + speed_factor = road_rel["speed"][speed_bin] + link["@static_rel"] = road_rel["intercept"] + lane_factor + speed_factor + else: + link["@static_rel"] = 0 diff --git a/tm2py/components/network/skims.py b/tm2py/components/network/skims.py new file mode 100644 index 00000000..5a371460 --- /dev/null +++ b/tm2py/components/network/skims.py @@ -0,0 +1,168 @@ +"""General skim-related tools.""" + +import itertools +import os +from typing import TYPE_CHECKING, Collection, Mapping, Union + +import numpy as np +from numpy import array as NumpyArray + +from tm2py.emme.matrix import OMXManager + +if TYPE_CHECKING: + from tm2py.controller import RunController + + +def get_summed_skims( + controller: "RunController", + mode: Union[str, Collection[str]], + veh_group_name: str, + time_period: str, + property: Union[str, Collection[str]], + omx_manager: OMXManager = None, +) -> NumpyArray: + """Sum skim matrices for list of properties and modes for time period. + + Args: + controller (RunController): _description_ + mode (Union[str,Collection[str]]): _description_ + time_period (str): _description_ + property (Union[str,Collection[str]]): _description_ + omx_manager (OMXManager, optional): _description_. Defaults to None. + + Returns: + NumpyArray: Numpy matrix of sums of skims from list. + """ + + if isinstance(mode, str): + mode = [mode] + if isinstance(property, str): + property = [property] + + _mode_prop = itertools.product(mode, property) + + _mx_list = [ + get_omx_skim_as_numpy( + controller, mode, veh_group_name, time_period, prop, omx_manager + ) + for mode, prop in _mode_prop + ] + + if len(_mx_list) == 1: + return _mx_list[0] + + return np.add(*_mx_list) + + +def get_omx_skim_as_numpy( + controller: "RunController", + skim_mode: str, + veh_group_name: str, + time_period: str, + property: str = "time", + omx_manager: OMXManager = None, +) -> NumpyArray: + """Get OMX skim by time and mode from folder and return a zone-to-zone NumpyArray. + + TODO make this independent of a model run (controller) so can be a function to use + in analysis. + + Args: + controller: tm2py controller, for accessing config. + mode: Mode to get. + time_period: Time period to get. + property: Property to get. Defaults to "time". + """ + + if time_period.upper() not in controller.time_period_names: + raise ValueError( + f"Skim time period {time_period.upper()} must be a subset of config time periods: {controller.time_period_names}" + ) + + # TODO need to more dutifully map skim modes to network modes + _hwy_classes = {c.name: c for c in controller.config.highway.classes} + if skim_mode in _hwy_classes.keys(): + _config = controller.config.highway + _mode_config = _hwy_classes[skim_mode] + + else: + raise NotImplementedError("Haven't implemented non highway skim access") + + if property not in _mode_config["skims"]: + property = property + "_" + veh_group_name + if property not in _mode_config["skims"]: + raise ValueError( + f"Property {property} not an available skim in mode {skim_mode}.\ + Available skims are: {_mode_config['skims']}" + ) + + _matrix_name = _config.output_skim_matrixname_tmpl.format( + time_period=time_period.upper(), + mode=skim_mode, + property=property, + ) + + # TODO figure out how to get upper() and lower() into actual format string + if omx_manager is None: + + _filename = _config.output_skim_filename_tmpl.format( + time_period=time_period.lower() + ) + _filepath = controller.run_dir / _config.output_skim_path / _filename + with OMXManager(_filepath, "r") as _f: + omx_data = _f.read(_matrix_name) + _f.close() + return omx_data + else: + _filename = _config.output_skim_filename_tmpl.format( + time_period=time_period.lower() + ) + if os.path.basename(omx_manager._file_path) != _filename: + omx_manager.close() + omx_manager._file_path = ( + controller.run_dir / _config.output_skim_path / _filename + ) + omx_manager.open() + omx_data = omx_manager.read(_matrix_name) + omx_manager.close() + return omx_data + + +def get_blended_skim( + controller: "RunController", + mode: str, + property: str = "time", + blend: Mapping[str, float] = {"AM": 0.3333333333, "MD": 0.6666666667}, +) -> NumpyArray: + r"""Blend skim values for distribution calculations. + + Note: Cube outputs skims\COM_HWYSKIMAM_taz.tpp, r'skims\COM_HWYSKIMMD_taz.tpp' + are in the highway_skims_{period}.omx files in Emme version + with updated matrix names, {period}_trk_time, {period}_lrgtrk_time. + Also, there will no longer be separate very small, small and medium + truck times, as they are assigned together as the same class. + There is only the trk_time. + + Args: + controller: Emme controller. + mode: Mode to blend. + property: Property to blend. Defaults to "time". + blend: Blend factors, a dictionary of mode:blend-multiplier where: + - sum of all blend multpiliers should equal 1. Defaults to `{"AM":1./3, "MD":2./3}` + - keys should be subset of _config.time_periods.names + """ + + if sum(blend.values()) != 1.0: + raise ValueError(f"Blend values must sum to 1.0: {blend}") + + _scaled_times = [] + for _tp, _multiplier in blend.items(): + _scaled_times.append( + get_omx_skim_as_numpy(controller, mode, "", _tp, property) * _multiplier + ) + + _blended_time = sum(_scaled_times) + return _blended_time + + +## TODO move availability mask from toll choice to here diff --git a/tm2py/components/network/transit/__init__.py b/tm2py/components/network/transit/__init__.py index 5b4136b3..af3a5fd0 100644 --- a/tm2py/components/network/transit/__init__.py +++ b/tm2py/components/network/transit/__init__.py @@ -1,3 +1,3 @@ -"""Transit assignment and skim module""" +"""Transit assignment and skim module.""" from .transit_assign import TransitAssignment from .transit_skim import TransitSkim diff --git a/tm2py/components/network/transit/transit_assign.py b/tm2py/components/network/transit/transit_assign.py index 5039df8f..836c5193 100644 --- a/tm2py/components/network/transit/transit_assign.py +++ b/tm2py/components/network/transit/transit_assign.py @@ -1,9 +1,2020 @@ -"""Transit assignment module""" +"""Transit assignment module.""" -from ...component import Component +from __future__ import annotations -# from ....controller import RunController +import inspect +import json as _json +import os +import textwrap +import copy +import pandas as pd +from collections import defaultdict as _defaultdict +from functools import partial +from typing import TYPE_CHECKING, Dict, List, Set, Tuple, Union + +from tm2py import tools +from tm2py.components.component import Component +from tm2py.components.demand.prepare_demand import PrepareTransitDemand +from tm2py.emme.manager import EmmeNetwork, EmmeScenario +from tm2py.logger import LogStartEnd +from tm2py.components.network.transit.transit_network import PrepareTransitNetwork + +if TYPE_CHECKING: + from tm2py.config import ( + CcrWeightsConfig, + CongestedWeightsConfig, + TransitClassConfig, + TransitConfig, + TransitModeConfig, + ) + from tm2py.controller import RunController + + +# QUESTION - can we put these functions in the TransitAssignment class? I pulled them out in case Emme was going to be picky about them being intertwined + + +def time_period_capacity( + vehicle_capacity: float, headway: float, time_period_duration: float +) -> float: + """_summary_ + + Args: + vehicle_capacity (float): Vehicle capacity per hour. For vehicles with multiple cars + (i.e. trainsets), should be the capacity of all of them that are traveling together. + headway (float): Vehicle (or train sets) per hour. + time_period_duration (float): duration of the time period in minutes + + Returns: + float: capacity for the whole time period + """ + return vehicle_capacity * time_period_duration * 60 / headway + + +def func_returns_crowded_segment_cost(time_period_duration, weights: CcrWeightsConfig): + """ + function that returns the calc_segment_cost function for emme assignment, with partial preloaded parameters + acts like partial as emme does not take partial + """ + + def calc_segment_cost(transit_volume: float, capacity, segment) -> float: + """Calculates crowding factor for a segment. + + Toronto implementation limited factor between 1.0 and 10.0. + For use with Emme Capacitated assignment normalize by subtracting 1 + + Args: + time_period_duration(float): time period duration in minutes + weights (_type_): transit capacity weights + segment_pax (float): transit passengers for the segment for the time period + segment: emme line segment + + Returns: + float: crowding factor for a segment + """ + + from tm2py.config import ( + CcrWeightsConfig, + EawtWeightsConfig, + TransitClassConfig, + TransitConfig, + TransitModeConfig, + ) + + if transit_volume == 0: + return 0.0 + + line = segment.line + # segment_capacity = time_period_capacity( + # line.vehicle.total_capacity, line.headway, time_period_duration + # ) + # seated_capacity = time_period_capacity( + # line.vehicle.seated_capacity, line.headway, time_period_duration + # ) + + seated_capacity = ( + line.vehicle.seated_capacity * {time_period_duration} * 60 / line.headway + ) + + seated_pax = min(transit_volume, seated_capacity) + standing_pax = max(transit_volume - seated_pax, 0) + + seated_cost = {weights}.min_seat + ({weights}.max_seat - {weights}.min_seat) * ( + transit_volume / capacity + ) ** {weights}.power_seat + + standing_cost = {weights}.min_stand + ( + {weights}.max_stand - {weights}.min_stand + ) * (transit_volume / capacity) ** {weights}.power_stand + + crowded_cost = (seated_cost * seated_pax + standing_cost * standing_pax) / ( + transit_volume + 0.01 + ) + + normalized_crowded_cost = max(crowded_cost - 1, 0) + + return normalized_crowded_cost + + # return textwrap.dedent(inspect.getsource(calc_segment_cost)) + + return textwrap.dedent(inspect.getsource(calc_segment_cost)).format( + time_period_duration=time_period_duration, weights=weights + ) + + +def func_returns_segment_congestion(time_period_duration, scenario, weights: CongestedWeightsConfig, use_fares: bool = False): + """ + function that returns the calc_segment_cost function for emme assignment, with partial preloaded parameters + acts like partial as emme does not take partial + """ + if use_fares: + values = scenario.get_attribute_values("TRANSIT_LINE", ["#src_mode"]) + scenario.set_attribute_values("TRANSIT_LINE", ["#src_mode"], values) + + def calc_segment_cost(transit_volume: float, capacity, segment) -> float: + """Calculates crowding factor for a segment. + + Toronto implementation limited factor between 1.0 and 10.0. + For use with Emme Capacitated assignment normalize by subtracting 1 + + Args: + time_period_duration(float): time period duration in minutes + weights (_type_): transit capacity weights + segment: emme line segment + + Returns: + float: crowding factor for a segment + """ + + from tm2py.config import ( + CongestedWeightsConfig, + TransitClassConfig, + TransitConfig, + TransitModeConfig, + ) + + if transit_volume <= 0: + return 0.0 + + line = segment.line + + if {use_fares}: + mode_char = line["#src_mode"] + else: + mode_char = line.mode.id + + if mode_char in ["p"]: + congestion = 0.25 * ((transit_volume / capacity) ** 8) + else: + seated_capacity = ( + line.vehicle.seated_capacity * {time_period_duration} * 60 / line.headway + ) + + seated_pax = min(transit_volume, seated_capacity) + standing_pax = max(transit_volume - seated_pax, 0) + + seated_cost = {weights}.min_seat + ({weights}.max_seat - {weights}.min_seat) * ( + transit_volume / capacity + ) ** {weights}.power_seat + + standing_cost = {weights}.min_stand + ( + {weights}.max_stand - {weights}.min_stand + ) * (transit_volume / capacity) ** {weights}.power_stand + + crowded_cost = (seated_cost * seated_pax + standing_cost * standing_pax) / ( + transit_volume + ) + + congestion = max(crowded_cost, 1) - 1.0 + + return congestion + + return textwrap.dedent(inspect.getsource(calc_segment_cost)).format( + time_period_duration=time_period_duration, weights=weights, use_fares = use_fares + ) + + +# def calc_segment_cost_curry(func, time_period_duration: float, weights): +# """ +# curry function for calc_segment_cost +# """ +# return (lambda y: func(time_period_duration, weights, y)) + +# def calc_segment_cost( +# time_period_duration: float, weights, transit_volume: float, segment +# ) -> float: +# """Calculates crowding factor for a segment. + +# Toronto implementation limited factor between 1.0 and 10.0. +# For use with Emme Capacitated assignment normalize by subtracting 1 + +# Args: +# time_period_duration(float): time period duration in minutes +# weights (_type_): transit capacity weights +# segment_pax (float): transit passengers for the segment for the time period +# segment: emme line segment + +# Returns: +# float: crowding factor for a segment +# """ + +# if transit_volume == 0: +# return 0.0 + +# line = segment.line +# segment_capacity = time_period_capacity( +# line.vehicle.total_capacity, line.headway, time_period_duration +# ) +# seated_capacity = time_period_capacity( +# line.vehicle.seated_capacity, line.headway, time_period_duration +# ) + +# seated_pax = min(transit_volume, seated_capacity) +# standing_pax = max(transit_volume - seated_pax, 0) + +# seated_cost = ( +# weights.min_seat +# + (weights.max_seat - weights.min_seat) +# * (transit_volume / segment_capacity) ** weights.power_seat +# ) + +# standing_cost = ( +# weights.min_stand +# + (weights.max_stand - weights.min_stand) +# * (transit_volume / segment_capacity) ** weights.power_stand +# ) + +# crowded_cost = (seated_cost * seated_pax + standing_cost * standing_pax) / ( +# transit_volume + 0.01 +# ) + +# normalized_crowded_cost = max(crowded_cost - 1, 0) + +# return normalized_crowded_cost + + +def calc_total_offs(line) -> float: + """Calculate total alightings for a line. + + Args: + line (_type_): _description_ + """ + # NOTE This was done previously using: + # total_offs += prev_seg.transit_volume - seg.transit_volume + seg.transit_boardings + # but offs should equal ons for a whole line, so this seems simpler + offs = [seg.transit_boardings for seg in line.segments(True)] + total_offs = sum(offs) + # added lambda due to divide by zero error + return total_offs if total_offs >= 0.001 else 9999 + + +def calc_offs_thru_segment(segment) -> float: + """_summary_ + + Args: + segment (_type_): _description_ + + Returns: + float: _description_ + """ + # SIJIA TODO check that it should be [:segment.number+1] . Not sure if 0-indexed in emme or 1-indexed? + segments_thru_this_segment = [seg for seg in iter(segment.line.segments(True))][ + : segment.number + 1 + ] + offs_thru_this_seg = [ + prev_seg.transit_volume - this_seg.transit_volume + this_seg.transit_boardings + for prev_seg, this_seg in zip( + segments_thru_this_segment[:-1], segments_thru_this_segment[1:] + ) + ] + total_offs_thru_this_seg = sum(offs_thru_this_seg) + return total_offs_thru_this_seg + + +def calc_extra_wait_time( + segment, + segment_capacity: float, + eawt_weights, + mode_config: dict, + use_fares: bool = False, +): + """Calculate extra added wait time based on... + + # TODO document fully. + + Args: + segment (_type_): Emme transit segment object. + segment_capacity (float): _description_ + eawt_weights: extra added wait time weights + mode_config: mode character to mode config + use_fares (bool, optional): _description_. Defaults to False. + + Returns: + _type_: _description_ + """ + _transit_volume = segment.transit_volume + _headway = segment.line.headway if segment.line.headway >= 0.1 else 9999 + _total_offs = calc_total_offs(segment.line) + _offs_thru_segment = calc_offs_thru_segment(segment) + + # TODO Document and add params to config. Have no idea what source is here. + eawt = ( + eawt_weights.constant + + eawt_weights.weight_inverse_headway * (1 / _headway) + + eawt_weights.vcr * (_transit_volume / segment_capacity) + + eawt_weights.exit_proportion * (_offs_thru_segment / _total_offs) + ) + + if use_fares: + eawt_factor = ( + 1 + if segment.line["#src_mode"] == "" + else mode_config[segment.line["#src_mode"]]["eawt_factor"] + ) + else: + eawt_factor = ( + 1 + if segment.line.mode.id == "" + else mode_config[segment.line.mode.id]["eawt_factor"] + ) + + return eawt * eawt_factor + + +def calc_adjusted_headway(segment, segment_capacity: float) -> float: + """Headway adjusted based on ....? + + TODO: add documentation about source and theory behind this. + + Args: + segment: Emme transit segment object + segment_capacity (float): _description_ + + Returns: + float: Adjusted headway + """ + # TODO add to params + max_hdwy_growth = 1.5 + max_headway = 999.98 + # QUESTION FOR INRO: what is the difference between segment["@phdwy"] and line.headway? + # is one the perceived headway? + _transit_volume = segment.transit_volume + _transit_boardings = segment.transit_boardings + _previous_headway = segment["@phdwy"] + _current_headway = segment.line.headway + _available_capacity = max( + segment_capacity - _transit_volume + _transit_boardings, 0 + ) + + adjusted_headway = min( + max_headway, + _previous_headway + * min((_transit_boardings + 1) / (_available_capacity + 1), 1.5), + ) + adjusted_headway = max(_current_headway, adjusted_headway) + + return adjusted_headway + + +def func_returns_calc_updated_perceived_headway( + time_period_duration, eawt_weights, mode_config, use_fares +): + """ + function that returns the calc_headway function for emme assignment, with partial preloaded parameters + acts like partial as emme does not take partial + """ + + def calc_headway(transit_volume, transit_boardings, headway, capacity, segment): + """Calculate perceived (???) headway updated by ... and extra added wait time. + + # TODO Document more fully. + + Args: + time_period_duration(float): time period duration in minutes + segment: Emme Transit segment object + eawt_weights: + mode_config: + use_fares (bool): if true, will use fares + + Returns: + _type_: _description_ + """ + # QUESTION FOR INRO: Kevin separately put segment.line.headway and headway as an arg. + # Would they be different? Why? + # TODO: Either can we label the headways so it is clear what is diff about them or just use single value? + + from tm2py.config import ( + CcrWeightsConfig, + EawtWeightsConfig, + TransitClassConfig, + TransitConfig, + TransitModeConfig, + ) + + _segment_capacity = capacity + + vcr = transit_volume / _segment_capacity + + _extra_added_wait_time = calc_extra_wait_time( + segment, + _segment_capacity, + {eawt_weights}, + {mode_config}, + {use_fares}, + ) + + _adjusted_headway = calc_adjusted_headway( + segment, + _segment_capacity, + ) + + return _adjusted_headway + _extra_added_wait_time + + return textwrap.dedent(inspect.getsource(calc_headway)).format( + time_period_duration=time_period_duration, + eawt_weights=eawt_weights, + mode_config=mode_config, + use_fares=use_fares, + ) + + +# def calc_headway_curry(func, time_period_duration: float, eawt_weights, mode_config, use_fares): +# """ +# curry function for calc_segment_cost +# """ +# return lambda y: func(time_period_duration, eawt_weights, mode_config, y, use_fares) + +# def calc_headway( +# time_period_duration: float, +# eawt_weights, +# mode_config, +# segment, +# use_fares: bool = False, +# ): +# """Calculate perceived (???) headway updated by ... and extra added wait time. + +# # TODO Document more fully. + +# Args: +# time_period_duration(float): time period duration in minutes +# segment: Emme Transit segment object +# eawt_weights: +# mode_config: +# use_fares (bool): if true, will use fares + +# Returns: +# _type_: _description_ +# """ +# # QUESTION FOR INRO: Kevin separately put segment.line.headway and headway as an arg. +# # Would they be different? Why? +# # TODO: Either can we label the headways so it is clear what is diff about them or just use single value? + +# _segment_capacity = time_period_capacity( +# segment.line.headway, segment.line.vehicle.total_capacity, time_period_duration +# ) + +# _extra_added_wait_time = calc_extra_wait_time( +# segment, +# _segment_capacity, +# eawt_weights, +# mode_config, +# use_fares, +# ) + +# _adjusted_headway = calc_adjusted_headway( +# segment, +# _segment_capacity, +# ) + +# return _adjusted_headway + _extra_added_wait_time + + +EmmeTransitJourneyLevelSpec = List[ + Dict[ + str, + Union[ + str, bool, List[Dict[str, Union[int, str]]], Dict[str, Union[float, str]] + ], + ] +] +EmmeTransitSpec = Dict[ + str, + Union[ + str, + Dict[str, Union[str, float, bool, Dict[str, Union[str, float]]]], + List[str], + EmmeTransitJourneyLevelSpec, + None, + ], +] class TransitAssignment(Component): """Run transit assignment.""" + + def __init__(self, controller: "RunController"): + """Constructor for TransitAssignment. + + Args: + controller: RunController object. + """ + super().__init__(controller) + self.config = self.controller.config.transit + self.sub_components = { + "prepare transit demand": PrepareTransitDemand(controller), + } + self.transit_network = PrepareTransitNetwork(controller) + self._demand_matrix = None # FIXME + self._num_processors = self.controller.emme_manager.num_processors + self._time_period = None + self._scenario = None + self._transit_emmebank = None + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + + @property + def transit_emmebank(self): + if not self._transit_emmebank: + self._transit_emmebank = self.controller.emme_manager.transit_emmebank + return self._transit_emmebank + + @LogStartEnd("Transit assignments") + def run(self): + """Run transit assignments.""" + + for time_period in self.time_period_names: + if self.controller.iteration == 0: + use_ccr = False + congested_transit_assignment = False + # update auto times + print("update_auto_times") + self.transit_network.update_auto_times(time_period) + + # run extended transit assignment and skimming + # if run warm start, trim the demands based on extended transit assignment and run congested assignment + # otherwise run with 0 demands + if self.controller.config.run.warmstart.warmstart: + # import transit demands + print("warmstart") + self.sub_components["prepare transit demand"].run() + print("uncongested") + self.run_transit_assign(time_period, use_ccr, congested_transit_assignment) + #TODO: run skim + #TODO: trim_demand + # create ccost attribute for skimming + scenario = self.transit_emmebank.scenario(time_period) + self._add_ccost_to_scenario(scenario) + # congested_transit_assignment = self.config.congested_transit_assignment + # # apply peaking factor + # if self.config.congested.use_peaking_factor: + # path_boardings = self.get_abs_path( + # self.config.output_transit_boardings_path + # ) + # ea_df_path = path_boardings.format(period='ea_pnr') + # if (time_period.lower() == 'am') and (os.path.isfile(ea_df_path)==False): + # raise Exception("run ea period first to account for the am peaking factor") + # if (time_period.lower() == 'am') and (os.path.isfile(ea_df_path)==True): + # print("peaking_factor") + # ea_df = pd.read_csv(ea_df_path) + # self._apply_peaking_factor(time_period, ea_df=ea_df) + # if (time_period.lower() == 'pm'): + # self._apply_peaking_factor(time_period) + # print("congested") + # self.run_transit_assign(time_period, use_ccr, congested_transit_assignment) + # if self.config.congested.use_peaking_factor and (time_period.lower() == 'ea'): + # self._apply_peaking_factor(time_period) + else: + self.transit_emmebank.zero_matrix #TODO: need further test + + else: # iteration >=1 + use_ccr = self.config.use_ccr + if (time_period in ['EA','EV','MD']): + congested_transit_assignment = False + else: + congested_transit_assignment = self.config.congested_transit_assignment + # update auto times + self.transit_network.update_auto_times(time_period) + # import transit demands + self.sub_components["prepare transit demand"].run() + +# if (self.config.congested.trim_demand_before_congested_transit_assignment and +# congested_transit_assignment): +# use_ccr = False +# congested_transit_assignment = False +# self.run_transit_assign(time_period, use_ccr, congested_transit_assignment) +# #TODO: run skim +# #TODO: trim_demand + + self.run_transit_assign(time_period, use_ccr, congested_transit_assignment) + + + # output_summaries + if self.config.output_stop_usage_path is not None: + network, class_stop_attrs = self._calc_connector_flows(time_period) + self._export_connector_flows(network, class_stop_attrs, time_period) + if self.config.output_transit_boardings_path is not None: + self._export_boardings_by_line(time_period) + if self.config.output_transit_segment_path is not None: + self._export_transit_segment(time_period) + if self.config.output_station_to_station_flow_path is not None: + self._export_boardings_by_station(time_period) + if self.config.output_transfer_at_station_path is not None: + self._export_transfer_at_stops(time_period) + + @LogStartEnd("Transit assignments for a time period") + def run_transit_assign(self, time_period: str, use_ccr: bool, congested_transit_assignment: bool): + + if use_ccr: + self._run_ccr_assign(time_period) + elif congested_transit_assignment: + self._run_congested_assign(time_period) + else: + self._run_extended_assign(time_period) + + def _apply_peaking_factor(self, time_period: str, ea_df=None): + """apply peaking factors. + + Args: + time_period: time period name abbreviation + """ + _emme_scenario = self.transit_emmebank.scenario(time_period) + _network = _emme_scenario.get_network() + _duration = self.time_period_durations[time_period.lower()] + + if time_period.lower() == 'am': + for line in _network.transit_lines(): + line["@orig_hdw"] = line.headway + line_name = line.id + line_veh = line.vehicle + line_hdw = line.headway + line_cap = 60 * _duration * line_veh.total_capacity / line_hdw + if line_name in ea_df['line_name_am'].to_list(): + ea_boardings = ea_df.loc[ea_df['line_name_am'] == line_name,'boardings'].values[0] + else: + ea_boardings = 0 + pnr_peaking_factor =(line_cap-ea_boardings)/line_cap #substract ea boardings from am parking capacity + non_pnr_peaking_factor = self.config.congested.am_peaking_factor + # in Emme transit assignment, the capacity is computed for each transit line as: 60 * _duration * vehicle.total_capacity / line.headway + # so instead of applying peaking factor to calculated capacity, we can divide line.headway by this peaking factor + # if ea number of parkers exceed the am parking capacity, set the headway to a very large number + if pnr_peaking_factor>0: + pnr_line_hdw = line_hdw/pnr_peaking_factor + else: + pnr_line_hdw = 999 + non_pnr_line_hdw = line_hdw*non_pnr_peaking_factor + if ('pnr' in line_name) and ('egr' in line_name): + continue + elif ('pnr' in line_name) and ('acc' in line_name): + line.headway = pnr_line_hdw + else: + line.headway = non_pnr_line_hdw + + if time_period.lower() == 'pm': + for line in _network.transit_lines(): + line["@orig_hdw"] = line.headway + line_name = line.id + line_hdw = line.headway + non_pnr_peaking_factor = self.config.congested.pm_peaking_factor + non_pnr_line_hdw = line_hdw*non_pnr_peaking_factor + if 'pnr' in line_name: + continue + else: + line.headway = non_pnr_line_hdw + + if time_period.lower() == 'ea': + line_name=[] + boards=[] + ea_pnr_df = pd.DataFrame() + for line in _network.transit_lines(): + boardings = 0 + for segment in line.segments(include_hidden=True): + boardings += segment.transit_boardings + line_name.append(line.id) + boards.append(boardings) + ea_pnr_df["line_name"] = line_name + ea_pnr_df["boardings"] = boards + ea_pnr_df["line_name_am"] = ea_pnr_df["line_name"].str.replace('EA','AM') #will substract ea boardings from am parking capacity + path_boardings = self.get_abs_path( + self.config.output_transit_boardings_path + ) + ea_pnr_df.to_csv(path_boardings.format(period='ea_pnr'), index=False) + + _update_attributes = { + "TRANSIT_LINE": ["@orig_hdw", "headway"] + } + self.controller.emme_manager.copy_attribute_values(_network, _emme_scenario, _update_attributes) + + def _transit_classes(self, time_period) -> List[TransitAssignmentClass]: + emme_manager = self.controller.emme_manager + if self.config.use_fares: + fare_modes = _defaultdict(lambda: set([])) + network = self.transit_emmebank.scenario(time_period).get_partial_network( + ["TRANSIT_LINE"], include_attributes=False + ) + emme_manager.copy_attribute_values( + self.transit_emmebank.scenario(time_period), + network, + {"TRANSIT_LINE": ["#src_mode"]}, + ) + for line in network.transit_lines(): + fare_modes[line["#src_mode"]].add(line.mode.id) + else: + fare_modes = None + spec_dir = os.path.join( + self.get_abs_path( + os.path.dirname(self.controller.config.emme.project_path) + ), + "Specifications", + ) + transit_classes = [] + for class_config in self.config.classes: + transit_classes.append( + TransitAssignmentClass( + class_config, + self.config, + time_period, + self.controller.iteration, + self._num_processors, + fare_modes, + spec_dir, + ) + ) + return transit_classes + + def _run_ccr_assign(self, time_period: str) -> None: + """Runs capacity constrained (??) CCR transit assignment for a time period + update penalties. + + Args: + time_period: time period name + """ + _duration = self.time_period_durations[time_period.lower()] + _ccr_weights = self.config.ccr_weights + _eawt_weights = self.config.eawt_weights + _mode_config = { + mode_config.mode_id: mode_config for mode_config in self.config.modes + } + _emme_scenario = self.transit_emmebank.scenario(time_period) + transit_classes = self._transit_classes(time_period) + + assign_transit = self.controller.emme_manager.tool( + "inro.emme.transit_assignment.capacitated_transit_assignment" + ) + _tclass_specs = [tclass.emme_transit_spec for tclass in transit_classes] + _tclass_names = [tclass.name for tclass in transit_classes] + + # NOTE TO SIJIA + # If sending the actual function doesn't work in EMME and its needs the TEXT of the + # function, then you can send it using + # + # put at top of code: + # import inspect.getsource + # + # replace _cost_func["python_function"]:... with + # "python_function": inspect.getsource(partial.crowded_segment_cost(_duration, _ccr_weights)) + # + # do similar with _headway_cost_function, etc. + + # segment_curry = calc_segment_cost_curry( + # calc_segment_cost, _duration, _ccr_weights + # ) + + # headway_curry = calc_headway_curry( + # calc_headway, + # _duration, + # _eawt_weights, + # _mode_config, + # use_fares=self.config.use_fares, + # ) + + _cost_func = { + "segment": { + "type": "CUSTOM", + # "python_function": textwrap.dedent(inspect.getsource(segment_curry)), + # "python_function": textwrap.dedent(inspect.getsource(lambda y: calc_segment_cost(_duration, _ccr_weights, y))), + "python_function": func_returns_crowded_segment_cost( + _duration, _ccr_weights + ), + "congestion_attribute": "us3", + "orig_func": False, + }, + "headway": { + "type": "CUSTOM", + # "python_function": textwrap.dedent(inspect.getsource(headway_curry)), + "python_function": func_returns_calc_updated_perceived_headway( + _duration, + _eawt_weights, + _mode_config, + use_fares=self.config.use_fares, + ) + + "\n" + + textwrap.dedent(inspect.getsource(calc_extra_wait_time)) + + "\n" + + textwrap.dedent(inspect.getsource(calc_adjusted_headway)) + + "\n" + + textwrap.dedent(inspect.getsource(calc_total_offs)) + + "\n" + + textwrap.dedent(inspect.getsource(calc_offs_thru_segment)), + }, + "assignment_period": _duration, + } + + _stop_criteria = { + "max_iterations": self.config.ccr_stop_criteria.max_iterations, + "relative_difference": self.config.ccr_stop_criteria.relative_difference, + "percent_segments_over_capacity": self.config.ccr_stop_criteria.percent_segments_over_capacity, + } + add_volumes = False + assign_transit( + _tclass_specs, + congestion_function=_cost_func, + stopping_criteria=_stop_criteria, + class_names=_tclass_names, + scenario=_emme_scenario, + log_worksheets=False, + ) + add_volumes = True + + # question - why do we need to do this between iterations AND ALSO give it to the EMME cost function? Does EMME not use it? + self._calc_segment_ccr_penalties(time_period) + + def _run_congested_assign(self, time_period: str) -> None: + """Runs congested transit assignment for a time period. + + Args: + time_period: time period name + """ + _duration = self.time_period_durations[time_period.lower()] + _congested_weights = self.config.congested_weights + _emme_scenario = self.transit_emmebank.scenario(time_period) + transit_classes = self._transit_classes(time_period) + + assign_transit = self.controller.emme_manager.tool( + "inro.emme.transit_assignment.congested_transit_assignment" + ) + _tclass_specs = [tclass.emme_transit_spec for tclass in transit_classes] + _tclass_names = [tclass.name for tclass in transit_classes] + + _cost_func = { + "type": "CUSTOM", + "python_function": func_returns_segment_congestion( + _duration, _emme_scenario, _congested_weights, use_fares=self.config.use_fares + ), + "congestion_attribute": "us3", + "orig_func": False, + "assignment_period": _duration, + } + + _stop_criteria = { + "max_iterations": self.congested_transit_assn_max_iteration[time_period.lower()], + "normalized_gap": self.config.congested.normalized_gap, + "relative_gap": self.config.congested.relative_gap, + } + add_volumes = False + assign_transit( + _tclass_specs, + congestion_function=_cost_func, + stopping_criteria=_stop_criteria, + class_names=_tclass_names, + scenario=_emme_scenario, + log_worksheets=False, + ) + add_volumes = True + + def _run_extended_assign(self, time_period: str) -> None: + """Run transit assignment without CCR. + + Args: + time_period (_type_): time period name + """ + assign_transit = self.controller.emme_manager.modeller.tool( + "inro.emme.transit_assignment.extended_transit_assignment" + ) + _emme_scenario = self.transit_emmebank.scenario(time_period) + + # Question for INRO: Why are we only adding subsequent volumes shouldn't it assume to be + # zero to begin with? + # Question for INRO: Can this function be distributed across machines? If so, how would + # that be structured? + add_volumes = False + for tclass in self._transit_classes(time_period): + assign_transit( + tclass.emme_transit_spec, + class_name=tclass.name, + add_volumes=add_volumes, + scenario=_emme_scenario, + ) + add_volumes = True + + def _get_network_with_boardings( + self, emme_scenario: "EmmeScenario" + ) -> "EmmeNetwork": + """Get networkw ith transit boardings by line and segment. + + Args: + emme_scenario (_type_): + + Returns: + EmmeNetwork: with transit boardings by line and segment. + """ + network = emme_scenario.get_partial_network( + ["TRANSIT_LINE", "TRANSIT_SEGMENT"], include_attributes=False + ) + _attributes = { + "TRANSIT_LINE": ["description", "#src_mode"], + "TRANSIT_SEGMENT": ["transit_boardings"], + } + _emme_manager = self.controller.emme_manager + _emme_manager.copy_attribute_values(emme_scenario, network, _attributes) + return network + + def _export_boardings_by_line(self, time_period: str) -> None: + """Export total boardings by line to config.transit.output_transit_boardings_file. + + args: + time_period (str): time period abbreviation + """ + _emme_scenario = self.transit_emmebank.scenario(time_period) + network = _emme_scenario.get_network() + + output_transit_boardings_file = self.get_abs_path( + self.config.output_transit_boardings_path + ) + + os.makedirs(os.path.dirname(output_transit_boardings_file), exist_ok=True) + + with open(output_transit_boardings_file.format(period=time_period.lower()), "w", encoding="utf8" + ) as out_file: + out_file.write(",".join(["line_name", + "description", + "total_boarding", + 'total_hour_cap', + "tm2_mode", + "line_mode", + "headway", + "fare_system", + ])) + out_file.write("\n") + for line in network.transit_lines(): + boardings = 0 + capacity = line.vehicle.total_capacity + hdw = line.headway + line_hour_cap = 60*capacity/hdw + if self.config.use_fares: + mode = line['#src_mode'] + else: + mode = line.mode + for segment in line.segments(include_hidden=True): + boardings += segment.transit_boardings + # total_board = sum(seg.transit_boardings for seg in line.segments) + out_file.write(",".join([str(x) for x in [line.id, + line['#description'], + boardings, + line_hour_cap, + line['#mode'], + mode, + line.headway, + line['#faresystem'], + ]])) + out_file.write("\n") + + def _calc_connector_flows( + self, time_period: str + ) -> Tuple["EmmeNetwork", Dict[str, str]]: + """Calculate boardings and alightings by assignment class. + + args: + time_period (str): time period abbreviation + + returns: + EmmeNetwork with aux_transit_volumes + transit class stop attributes: {: @aux_volume_...} + """ + _emme_manager = self.controller.emme_manager + _emme_scenario = self.transit_emmebank.scenario(time_period) + network_results = _emme_manager.tool( + "inro.emme.transit_assignment.extended.network_results" + ) + create_extra = _emme_manager.tool( + "inro.emme.data.extra_attribute.create_extra_attribute" + ) + tclass_stop_attrs = {} + for tclass in self.config.classes: + attr_name = f"@aux_vol_{tclass.name}".lower() # maximum length 20 limit + create_extra("LINK", attr_name, overwrite=True, scenario=_emme_scenario) + spec = { + "type": "EXTENDED_TRANSIT_NETWORK_RESULTS", + "on_links": {"aux_transit_volumes": attr_name}, + } + network_results(spec, class_name=tclass.name, scenario=_emme_scenario) + tclass_stop_attrs[tclass.name] = attr_name + + # optimization: partial network to only load links and certain attributes + network = _emme_scenario.get_partial_network(["LINK"], include_attributes=True) + attributes = { + "LINK": tclass_stop_attrs.values(), + "NODE": ["@taz_id", "#node_id"], + } + _emme_manager.copy_attribute_values(_emme_scenario, network, attributes) + return network, tclass_stop_attrs + + def _export_connector_flows( + self, network: EmmeNetwork, class_stop_attrs: Dict[str, str], time_period: str + ): + """Export boardings and alightings by assignment class, stop(connector) and TAZ. + + args: + network: network to use + class_stop_attrs: list of attributes to export + """ + path_tmplt = self.get_abs_path(self.config.output_stop_usage_path) + os.makedirs(os.path.dirname(path_tmplt), exist_ok=True) + with open( + path_tmplt.format(period=time_period.lower()), "w", encoding="utf8" + ) as out_file: + out_file.write(",".join(["mode", "taz", "stop", "boardings", "alightings"])) + out_file.write("\n") + for zone in network.centroids(): + taz_id = int(zone["@taz_id"]) + for link in zone.outgoing_links(): + stop_id = link.j_node["#node_id"] + for name, attr_name in class_stop_attrs.items(): + alightings = ( + link.reverse_link[attr_name] if link.reverse_link else 0.0 + ) + out_file.write( + f"{name}, {taz_id}, {stop_id}, {link[attr_name]}, {alightings}\n" + ) + for link in zone.incoming_links(): + if link.reverse_link: # already exported + continue + stop_id = link.i_node["#node_id"] + for name, attr_name in class_stop_attrs.items(): + out_file.write( + f"{name}, {taz_id}, {stop_id}, 0.0, {link[attr_name]}\n" + ) + + def _export_transit_segment(self, time_period: str): + # add total boardings by access mode + _emme_manager = self.controller.emme_manager + _emme_scenario = self.transit_emmebank.scenario(time_period) + network_results = _emme_manager.tool( + "inro.emme.transit_assignment.extended.network_results" + ) + create_extra = _emme_manager.tool( + "inro.emme.data.extra_attribute.create_extra_attribute" + ) + for tclass in self.config.classes: + initial_board_attr_name = f"@iboard_{tclass.name}".lower() + direct_xboard_attr_name = f"@dboard_{tclass.name}".lower() + auxiliary_xboard_attr_name = f"@aboard_{tclass.name}".lower() + create_extra("TRANSIT_SEGMENT", initial_board_attr_name, overwrite=True, scenario=_emme_scenario) + create_extra("TRANSIT_SEGMENT", direct_xboard_attr_name, overwrite=True, scenario=_emme_scenario) + create_extra("TRANSIT_SEGMENT", auxiliary_xboard_attr_name, overwrite=True, scenario=_emme_scenario) + spec = { + "type": "EXTENDED_TRANSIT_NETWORK_RESULTS", + "on_segments": {"initial_boardings": initial_board_attr_name, + "transfer_boardings_direct": direct_xboard_attr_name, + "transfer_boardings_indirect": auxiliary_xboard_attr_name}, + } + network_results(spec, class_name=tclass.name, scenario=_emme_scenario) + + network = _emme_scenario.get_network() + path_boardings = self.get_abs_path(self.config.output_transit_segment_path) + with open(path_boardings.format(period=time_period.lower()), "w") as f: + f.write(",".join(["line", + "stop_name", + "i_node", + "j_node", + "dwt", + "ttf", + "voltr", + "board", + "con_time", + "uncon_time", + "mode", + "src_mode", + "mdesc", + "hdw", + "orig_hdw", + "speed", + "vauteq", + "vcaps", + "vcapt", + "initial_board_ptw", + "initial_board_wtp", + "initial_board_ktw", + "initial_board_wtk", + "initial_board_wtw", + "direct_transfer_board_ptw", + "direct_transfer_board_wtp", + "direct_transfer_board_ktw", + "direct_transfer_board_wtk", + "direct_transfer_board_wtw", + "auxiliary_transfer_board_ptw", + "auxiliary_transfer_board_wtp", + "auxiliary_transfer_board_ktw", + "auxiliary_transfer_board_wtk", + "auxiliary_transfer_board_wtw" + ])) + f.write("\n") + + for line in network.transit_lines(): + for segment in line.segments(include_hidden=True): + if self.config.use_fares: + mode = segment.line['#src_mode'] + else: + mode = segment.line.mode + if self.config.congested.use_peaking_factor and (time_period.lower() in ['am','pm']): + orig_headway = segment.line['@orig_hdw'] + else: + orig_headway = segment.line.headway + f.write(",".join([str(x) for x in [ + segment.id, + '"{0}"'.format(segment['#stop_name']), + segment.i_node, + segment.j_node, + segment.dwell_time, + segment.transit_time_func, + segment.transit_volume, + segment.transit_boardings, + segment.transit_time, + segment['@trantime_seg'], + segment.line.mode, + mode, + segment.line.mode.description, + segment.line.headway, + orig_headway, + segment.line.speed, + segment.line.vehicle.auto_equivalent, + segment.line.vehicle.seated_capacity, + segment.line.vehicle.total_capacity, + segment['@iboard_pnr_trn_wlk'], + segment['@iboard_wlk_trn_pnr'], + segment['@iboard_knr_trn_wlk'], + segment['@iboard_wlk_trn_knr'], + segment['@iboard_wlk_trn_wlk'], + segment['@dboard_pnr_trn_wlk'], + segment['@dboard_wlk_trn_pnr'], + segment['@dboard_knr_trn_wlk'], + segment['@dboard_wlk_trn_knr'], + segment['@dboard_wlk_trn_wlk'], + segment['@aboard_pnr_trn_wlk'], + segment['@aboard_wlk_trn_pnr'], + segment['@aboard_knr_trn_wlk'], + segment['@aboard_wlk_trn_knr'], + segment['@aboard_wlk_trn_wlk'] + ]])) + f.write("\n") + + def _export_boardings_by_station(self, time_period: str): + _emme_manager = self.controller.emme_manager + _emme_scenario = self.transit_emmebank.scenario(time_period) + network = _emme_scenario.get_network() + sta2sta = _emme_manager.tool( + "inro.emme.transit_assignment.extended.station_to_station_analysis") + sta2sta_spec = { + "type": "EXTENDED_TRANSIT_STATION_TO_STATION_ANALYSIS", + "transit_line_selections": { + "first_boarding": "mode=h", + "last_alighting": "mode=h" + }, + "analyzed_demand": None, + } + + # map to used modes in apply fares case + fare_modes = _defaultdict(lambda: set([])) + for line in network.transit_lines(): + if self.config.use_fares: + fare_modes[line["#src_mode"]].add(line.mode.id) + else: + fare_modes[line.mode.id].add(line.mode.id) + + operator_dict = { + # mode: network_selection + 'bart': "h", + 'caltrain': "r" + } + + for tclass in self.config.classes: + for op, cut in operator_dict.items(): + demand_matrix = "mfTRN_%s_%s" % (tclass.name, time_period) + output_file_name = self.get_abs_path(self.config.output_station_to_station_flow_path) + + sta2sta_spec['transit_line_selections']['first_boarding'] = "mode="+",".join(list(fare_modes[cut])) + sta2sta_spec['transit_line_selections']['last_alighting'] = "mode="+",".join(list(fare_modes[cut])) + sta2sta_spec['analyzed_demand'] = demand_matrix + + output_path = output_file_name.format(operator=op, tclass=tclass.name, period=time_period.lower()) + sta2sta(specification=sta2sta_spec, + output_file=output_path, + scenario=_emme_scenario, + append_to_output_file=False, + class_name=tclass.name) + + def _export_transfer_at_stops(self, time_period: str): + _emme_manager = self.controller.emme_manager + _emme_scenario = self.transit_emmebank.scenario(time_period) + network = _emme_scenario.get_network() + transfers_at_stops = _emme_manager.tool( + "inro.emme.transit_assignment.extended.apps.transfers_at_stops") + + stop_location = self.config.output_transfer_at_station_node_ids + stop_location_val_key = {val:key for key, val in stop_location.items()} + + for node in network.nodes(): + if stop_location_val_key.get(node["#node_id"]): + stop_location[stop_location_val_key[node["#node_id"]]] = node.id + + for tclass in self.config.classes: + for stop_name, stop_id in stop_location.items(): + demand_matrix = "mfTRN_%s_%s" % (tclass.name, time_period) + output_file_name = self.get_abs_path(self.config.output_transfer_at_station_path) + output_path = output_file_name.format(tclass=tclass.name, stop=stop_name, period=time_period.lower()) + + transfers_at_stops(selection=f"i={stop_id}", + export_path=output_path, + scenario=_emme_scenario, + class_name=tclass.name, + analyzed_demand=demand_matrix) + + def _add_ccr_vars_to_scenario(self, emme_scenario: "EmmeScenario") -> None: + """Add Extra Added Wait Time and Capacity Penalty to emme scenario. + + Args: + emme_scenario : EmmeScenario + """ + create_extra = self.controller.emme_manager.tool( + "inro.emme.data.extra_attribute.create_extra_attribute" + ) + create_extra( + "TRANSIT_SEGMENT", + "@eawt", + "extra added wait time", + overwrite=True, + scenario=emme_scenario, + ) + create_extra( + "TRANSIT_SEGMENT", + "@capacity_penalty", + "capacity penalty at boarding", + overwrite=True, + scenario=emme_scenario, + ) + + + def _add_ccost_to_scenario(self, emme_scenario: "EmmeScenario") -> None: + """Add Extra Added Wait Time and Capacity Penalty to emme scenario. + + Args: + emme_scenario : EmmeScenario + """ + create_extra = self.controller.emme_manager.tool( + "inro.emme.data.extra_attribute.create_extra_attribute" + ) + create_extra( + "TRANSIT_SEGMENT", + "@ccost", + "congested cost", + overwrite=True, + scenario=emme_scenario, + ) + + + def _get_network_with_ccr_scenario_attributes(self, emme_scenario): + + self._add_ccr_vars_to_scenario(emme_scenario) + + _attributes = { + "TRANSIT_SEGMENT": [ + "@phdwy", + "transit_volume", + "transit_boardings", + ], + "TRANSIT_VEHICLE": ["seated_capacity", "total_capacity"], + "TRANSIT_LINE": ["headway"], + } + if self.config.use_fares: + _attributes["TRANSIT_LINE"].append("#src_mode") + + # load network object from scenario (on disk) and copy some attributes + network = emme_scenario.get_partial_network( + ["TRANSIT_SEGMENT"], include_attributes=False + ) + network.create_attribute("TRANSIT_LINE", "capacity") + + self.emme_manager.copy_attribute_values(emme_scenario, network, _attributes) + return network + + def _calc_segment_ccr_penalties(self, time_period): + """Calculate extra average wait time (@eawt) and @capacity_penalty on the segments. + + TODO: INRO Please document + + + """ + _emme_scenario = self.transit_emmebank.scenario(time_period) + _network = self._get_network_with_ccr_scenario_attributes(_emme_scenario) + + _eawt_weights = self.config.eawt_weights + _mode_config = { + mode_config.mode_id: mode_config for mode_config in self.config.modes + } + + _duration = self.time_period_durations[time_period.lower()] + for line in _network.transit_lines(): + line.capacity = time_period_capacity( + line.vehicle.total_capacity, line.headway, _duration + ) + + # QUESTION: document origin of this param. + _hdwy_fraction = 0.5 # fixed in assignment spec + for segment in _network.transit_segments(): + segment["@eawt"] = calc_extra_wait_time( + segment, + segment.line.capacity, + _eawt_weights, + _mode_config, + use_fares=self.config.use_fares, + ) + segment["@capacity_penalty"] = ( + max(segment["@phdwy"] - segment["@eawt"] - segment.line.headway, 0) + * _hdwy_fraction + ) + # copy (save) results back from the network to the scenario (on disk) + _ccr_attributes = {"TRANSIT_SEGMENT": ["@eawt", "@capacity_penalty"]} + self.emme_manager.copy_attribute_values( + _network, _emme_scenario, _ccr_attributes + ) + + +class TransitAssignmentClass: + """Transit assignment class, represents data from config and conversion to Emme specs. + + Internal properties: + _name: the class name loaded from config (not to be changed) + _class_config: the transit class config (TransitClassConfig) + _transit_config: the root transit assignment config (TransitConfig) + _time_period: the time period name + _iteration: the current iteration + _num_processors: the number of processors to use, loaded from config + _fare_modes: the mapping from the generated fare mode ID to the original + source mode ID + _spec_dir: directory to find the generated journey levels tables from + the apply fares step + """ + + # disable too many instance attributes and arguments recommendations + # pylint: disable=R0902, R0913 + + def __init__( + self, + tclass_config: TransitClassConfig, + config: TransitConfig, + time_period: str, + iteration: int, + num_processors: int, + fare_modes: Dict[str, Set[str]], + spec_dir: str, + ): + """Assignment class constructor. + + Args: + tclass_config: the transit class config (TransitClassConfig) + config: the root transit assignment config (TransitConfig) + time_period: the time period name + iteration: the current iteration + num_processors: the number of processors to use, loaded from config + fare_modes: the mapping from the generated fare mode ID to the original + source mode ID + spec_dir: directory to find the generated journey levels tables from + the apply fares step + """ + self._name = tclass_config.name + self._class_config = tclass_config + self._config = config + self._time_period = time_period + self._iteration = iteration + self._num_processors = num_processors + self._fare_modes = fare_modes + self._spec_dir = spec_dir + + @property + def name(self) -> str: + """The class name.""" + return self._name + + @property + def emme_transit_spec(self) -> EmmeTransitSpec: + """Return Emme Extended transit assignment specification. + + Converted from input config (transit.classes, with some parameters from + transit table), see also Emme Help for + Extended transit assignment for specification details. + + """ + spec = { + "type": "EXTENDED_TRANSIT_ASSIGNMENT", + "modes": self._modes, + "demand": self._demand_matrix, + "waiting_time": { + "effective_headways": self._config.effective_headway_source, + "headway_fraction": "@hdw_fraction", + "perception_factor": self._config.initial_wait_perception_factor, + "spread_factor": 1.0, + }, + "boarding_cost": {"global": {"penalty": 0, "perception_factor": 1}}, + "boarding_time": { + "on_lines": { + "penalty": "@iboard_penalty", + "perception_factor": 1, + } + }, + "in_vehicle_cost": None, + "in_vehicle_time": {"perception_factor": "@invehicle_factor"}, + "aux_transit_time": { + "perception_factor": 1 + }, # walk and drive perception factors are specified in mode definition "speed_or_time_factor" + "aux_transit_cost": None, + "journey_levels": self._journey_levels, + "flow_distribution_between_lines": {"consider_total_impedance": False}, + "flow_distribution_at_origins": { + "fixed_proportions_on_connectors": None, + "choices_at_origins": "OPTIMAL_STRATEGY", + }, + "flow_distribution_at_regular_nodes_with_aux_transit_choices": { + "choices_at_regular_nodes": "OPTIMAL_STRATEGY" + }, + "circular_lines": {"stay": False}, + "connector_to_connector_path_prohibition": None, + "od_results": {"total_impedance": None}, + "performance_settings": {"number_of_processors": self._num_processors}, + } + if self._config.use_fares: + fare_perception = 60 / self._config.value_of_time + spec["boarding_cost"] = { + "on_segments": { + "penalty": "@board_cost", + "perception_factor": fare_perception, + } + } + spec["in_vehicle_cost"] = { + "penalty": "@invehicle_cost", + "perception_factor": fare_perception, + } + # Optional aux_transit_cost, used for walk time on connectors, + # set if override_connector_times is on + if self._config.get("override_connector_times", False): + spec["aux_transit_cost"] = { + "penalty": f"@walk_time_{self.name.lower()}", + "perception_factor": self._config.walk_perception_factor, + } + return spec + + @property + def _demand_matrix(self) -> str: + # if self._iteration < 1: + # return 'ms"zero"' # zero demand matrix + return f'mf"TRN_{self._class_config.skim_set_id}_{self._time_period}"' + + def _get_used_mode_ids(self, modes: List[TransitModeConfig]) -> List[str]: + """Get list of assignment Mode IDs from input list of Emme mode objects. + + Accounts for fare table (mapping from input mode ID to auto-generated + set of mode IDs for fare transition table (fares.far input) by applyfares + component. + """ + if self._config.use_fares: + out_modes = set([]) + for mode in modes: + if mode.assign_type == "TRANSIT": + out_modes.update(self._fare_modes[mode.mode_id]) + else: + out_modes.add(mode.mode_id) + return list(out_modes) + return [mode.mode_id for mode in modes] + + @property + def _modes(self) -> List[str]: + """List of modes IDs (str) to use in assignment for this class.""" + all_modes = self._config.modes + mode_types = self._class_config.mode_types + modes = [mode for mode in all_modes if mode.type in mode_types] + return self._get_used_mode_ids(modes) + + @property + def _transit_modes(self) -> List[str]: + """List of transit modes IDs (str) to use in assignment for this class.""" + all_modes = self._config.modes + mode_types = self._class_config.mode_types + modes = [ + mode + for mode in all_modes + if mode.type in mode_types and mode.assign_type == "TRANSIT" + ] + return self._get_used_mode_ids(modes) + + @property + def fare_perception(self): + return 60 / self._config.value_of_time + + @property + def headway_fraction(self): + return 0.5 + + @property + def _journey_levels(self) -> EmmeTransitJourneyLevelSpec: + modes = self._transit_modes + effective_headway_source = self._config.effective_headway_source + if self._config.use_fares: + fare_perception = self.fare_perception + file_name = f"{self._time_period}_ALLPEN_journey_levels.ems" + with open( + os.path.join(self._spec_dir, file_name), "r", encoding="utf8" + ) as jl_spec: + journey_levels = _json.load(jl_spec)["journey_levels"] + + if self.name == "PNR_TRN_WLK": + new_journey_levels = copy.deepcopy(journey_levels) + + for i in range(0,len(new_journey_levels)): + jls = new_journey_levels[i] + for level in jls["transition_rules"]: + level["next_journey_level"] = level["next_journey_level"]+1 + jls["transition_rules"].extend( + [ + {'mode': 'e', 'next_journey_level': i+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': i+2}, + {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + # level 0: drive access + transition_rules_drive_access = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_drive_access: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_drive_access.extend( + [ + {'mode': 'e', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': 0}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'p', 'next_journey_level': 1} + ] + ) + # level 1: use transit + transition_rules_pnr = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_pnr: + level["next_journey_level"] = 2 + transition_rules_pnr.extend( + [ + {'mode': 'e', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'p', 'next_journey_level': 1} + ] + ) + # level len(new_journey_levels)+2: every mode is prohibited + transition_rules_prohibit = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_prohibit: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_prohibit.extend( + [ + {'mode': 'e', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + new_journey_levels.insert( + 0, + { + "description": "drive access", + "destinations_reachable": False, + "transition_rules": transition_rules_drive_access, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.insert( + 1, + { + "description": "pnr", + "destinations_reachable": False, + "transition_rules": transition_rules_pnr, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.append( + { + "description": "prohibit", + "destinations_reachable": False, + "transition_rules": transition_rules_prohibit, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + for level in new_journey_levels[2:-1]: + level["waiting_time"] = { + "headway_fraction": "@hdw_fraction", + "effective_headways": effective_headway_source, + "spread_factor": 1, + "perception_factor": "@wait_pfactor" + } + level["boarding_time"] = { + "on_lines": { + "penalty": "@xboard_penalty", "perception_factor": 1}, + "at_nodes": { + "penalty": "@xboard_nodepen", "perception_factor": 1}, + } + # add in the correct value of time parameter + for level in new_journey_levels: + if level["boarding_cost"]: + level["boarding_cost"]["on_segments"][ + "perception_factor" + ] = fare_perception + + elif self.name == "WLK_TRN_PNR": + new_journey_levels = copy.deepcopy(journey_levels) + + for i in range(0,len(new_journey_levels)): + jls = new_journey_levels[i] + jls["destinations_reachable"] = False + jls["transition_rules"].extend( + [ + {'mode': 'a', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': i+1}, + {'mode': 'p', 'next_journey_level': len(new_journey_levels)+1} + ] + ) + # level 0: walk access + transition_rules_walk_access = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_walk_access: + level["next_journey_level"] = 1 + transition_rules_walk_access.extend( + [ + {'mode': 'a', 'next_journey_level': 0}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + # level len(new_journey_levels)+1: drive home + transition_rules_drive_home = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_drive_home: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_drive_home.extend( + [ + {'mode': 'a', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+1}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + # level len(new_journey_levels)+2: every mode is prohibited + transition_rules_prohibit = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_prohibit: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_prohibit.extend( + [ + {'mode': 'a', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + new_journey_levels.insert( + 0, + { + "description": "walk access", + "destinations_reachable": True, + "transition_rules": transition_rules_walk_access, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.append( + { + "description": "drive home", + "destinations_reachable": True, + "transition_rules": transition_rules_drive_home, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.append( + { + "description": "prohibit", + "destinations_reachable": False, + "transition_rules": transition_rules_prohibit, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + for level in new_journey_levels[1:-2]: + level["waiting_time"] = { + "headway_fraction": "@hdw_fraction", + "effective_headways": effective_headway_source, + "spread_factor": 1, + "perception_factor": "@wait_pfactor" + } + level["boarding_time"] = { + "on_lines": { + "penalty": "@xboard_penalty", "perception_factor": 1}, + "at_nodes": { + "penalty": "@xboard_nodepen", "perception_factor": 1}, + } + # add in the correct value of time parameter + for level in new_journey_levels: + if level["boarding_cost"]: + level["boarding_cost"]["on_segments"]["perception_factor"] = fare_perception + + elif self.name == "KNR_TRN_WLK": + new_journey_levels = copy.deepcopy(journey_levels) + + for i in range(0,len(new_journey_levels)): + jls = new_journey_levels[i] + for level in jls["transition_rules"]: + level["next_journey_level"] = level["next_journey_level"]+1 + jls["transition_rules"].extend( + [ + {'mode': 'e', 'next_journey_level': i+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': i+2}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + # level 0: drive access + transition_rules_drive_access = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_drive_access: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_drive_access.extend( + [ + {'mode': 'e', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': 0}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': 1} + ] + ) + # level 1: use transit + transition_rules_knr = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_knr: + level["next_journey_level"] = 2 + transition_rules_knr.extend( + [ + {'mode': 'e', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': 1} + ] + ) + # level len(new_journey_levels)+2: every mode is prohibited + transition_rules_prohibit = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_prohibit: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_prohibit.extend( + [ + {'mode': 'e', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + new_journey_levels.insert( + 0, + { + "description": "drive access", + "destinations_reachable": False, + "transition_rules": transition_rules_drive_access, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.insert( + 1, + { + "description": "knr", + "destinations_reachable": False, + "transition_rules": transition_rules_knr, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.append( + { + "description": "prohibit", + "destinations_reachable": False, + "transition_rules": transition_rules_prohibit, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + for level in new_journey_levels[2:-1]: + level["waiting_time"] = { + "headway_fraction": "@hdw_fraction", + "effective_headways": effective_headway_source, + "spread_factor": 1, + "perception_factor": "@wait_pfactor" + } + level["boarding_time"] = { + "on_lines": { + "penalty": "@xboard_penalty", "perception_factor": 1}, + "at_nodes": { + "penalty": "@xboard_nodepen", "perception_factor": 1}, + } + # add in the correct value of time parameter + for level in new_journey_levels: + if level["boarding_cost"]: + level["boarding_cost"]["on_segments"]["perception_factor"] = fare_perception + + elif self.name == "WLK_TRN_KNR": + new_journey_levels = copy.deepcopy(journey_levels) + + for i in range(0,len(new_journey_levels)): + jls = new_journey_levels[i] + jls["destinations_reachable"] = False + jls["transition_rules"].extend( + [ + {'mode': 'a', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': i+1}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': len(new_journey_levels)+1} + ] + ) + # level 0: walk access + transition_rules_walk_access = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_walk_access: + level["next_journey_level"] = 1 + transition_rules_walk_access.extend( + [ + {'mode': 'a', 'next_journey_level': 0}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + # level len(new_journey_levels)+1: drive home + transition_rules_drive_home = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_drive_home: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_drive_home.extend( + [ + {'mode': 'a', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+1}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + # level len(new_journey_levels)+2: every mode is prohibited + transition_rules_prohibit = copy.deepcopy(journey_levels[0]["transition_rules"]) + for level in transition_rules_prohibit: + level["next_journey_level"] = len(new_journey_levels)+2 + transition_rules_prohibit.extend( + [ + {'mode': 'a', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'D', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'w', 'next_journey_level': len(new_journey_levels)+2}, + ## {'mode': 'p', 'next_journey_level': len(new_journey_levels)+2}, + {'mode': 'k', 'next_journey_level': len(new_journey_levels)+2} + ] + ) + new_journey_levels.insert( + 0, + { + "description": "walk access", + "destinations_reachable": True, + "transition_rules": transition_rules_walk_access, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.append( + { + "description": "drive home", + "destinations_reachable": True, + "transition_rules": transition_rules_drive_home, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + new_journey_levels.append( + { + "description": "prohibit", + "destinations_reachable": False, + "transition_rules": transition_rules_prohibit, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + for level in new_journey_levels[1:-2]: + level["waiting_time"] = { + "headway_fraction": "@hdw_fraction", + "effective_headways": effective_headway_source, + "spread_factor": 1, + "perception_factor": "@wait_pfactor" + } + level["boarding_time"] = { + "on_lines": { + "penalty": "@xboard_penalty", "perception_factor": 1}, + "at_nodes": { + "penalty": "@xboard_nodepen", "perception_factor": 1}, + } + # add in the correct value of time parameter + for level in new_journey_levels: + if level["boarding_cost"]: + level["boarding_cost"]["on_segments"]["perception_factor"] = fare_perception + + elif self.name == "WLK_TRN_WLK": + new_journey_levels = copy.deepcopy(journey_levels) + transition_rules = copy.deepcopy(journey_levels[0]["transition_rules"]) + new_journey_levels.insert( + 0, + { + "description": "base", + "destinations_reachable": True, + "transition_rules": transition_rules, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": None + } + ) + for level in new_journey_levels[1:]: + level["waiting_time"] = { + "headway_fraction": "@hdw_fraction", + "effective_headways": effective_headway_source, + "spread_factor": 1, + "perception_factor": "@wait_pfactor" + } + level["boarding_time"] = { + "on_lines": { + "penalty": "@xboard_penalty", "perception_factor": 1}, + "at_nodes": { + "penalty": "@xboard_nodepen", "perception_factor": 1}, + } + # add in the correct value of time parameter + for level in new_journey_levels: + if level["boarding_cost"]: + level["boarding_cost"]["on_segments"]["perception_factor"] = fare_perception + + with open( + os.path.join( + self._spec_dir, + "%s_%s_journey_levels.ems" % (self._time_period, self.name) + ), + "w", + ) as jl_spec_file: + spec = {"type": "EXTENDED_TRANSIT_ASSIGNMENT", "journey_levels": new_journey_levels} + _json.dump(spec, jl_spec_file, indent=4) + + else: + new_journey_levels = [ + { + "description": "", + "destinations_reachable": True, + "transition_rules": [ + {"mode": m, "next_journey_level": 1} for m in modes + ], + }, + { + "description": "", + "destinations_reachable": True, + "transition_rules": [ + {"mode": m, "next_journey_level": 1} for m in modes + ], + "waiting_time": { + "headway_fraction": "@hdw_fraction", + "effective_headways": effective_headway_source, + "spread_factor": 1, + "perception_factor": "@wait_pfactor", + }, + }, + ] + for level in new_journey_levels[1:]: + level["boarding_time"] = { + "on_lines": { + "penalty": "@xboard_penalty", "perception_factor": 1}, + "at_nodes": { + "penalty": "@xboard_nodepen", "perception_factor": 1}, + } + + return new_journey_levels diff --git a/tm2py/components/network/transit/transit_network.py b/tm2py/components/network/transit/transit_network.py new file mode 100644 index 00000000..8422957c --- /dev/null +++ b/tm2py/components/network/transit/transit_network.py @@ -0,0 +1,1794 @@ +"""Transit network preparation module.""" + +from __future__ import annotations + +import json as _json +import os +import time as _time +import traceback as _traceback +from collections import defaultdict as _defaultdict +from copy import deepcopy as _copy +from typing import Dict + +import pandas as pd +import shapely.geometry as _geom +from inro.modeller import PageBuilder +from scipy.optimize import nnls as _nnls +from typing_extensions import TYPE_CHECKING, Literal + +from tm2py.components.component import Component +from tm2py.emme.manager import EmmeLink, EmmeNetwork, EmmeScenario +from tm2py.emme.network import NoPathFound, find_path +from tm2py.logger import LogStartEnd + +if TYPE_CHECKING: + from tm2py.controller import RunController + + +class PrepareTransitNetwork(Component): + """Transit assignment and skim-related network preparation.""" + + def __init__(self, controller: "RunController"): + """Constructor for PrepareTransitNetwork class. + + Args: + controller: The RunController instance. + """ + super().__init__(controller) + self.config = self.controller.config.transit + self._emme_manager = self.controller.emme_manager + self._transit_emmebank = None + self._transit_networks = None + self._transit_scenarios = None + self._highway_emmebank = None + self._highway_scenarios = None + self._auto_emmebank = None + self._auto_networks = None + self._auto_scenarios = None + self._access_connector_df = None + self._egress_connector_df = None + + @LogStartEnd( + "Prepare transit network attributes and update times from auto network." + ) + def run(self): + """Prepare transit network for assignment. + + Updates link travel times from auto network and + (if using TAZ-connectors for assignment) update connector walk times. + """ + if self.controller.iteration == 0: + for period in self.controller.time_period_names: + with self.logger.log_start_end(f"period {period}"): + scenario = self.transit_emmebank.scenario(period) + attributes = { + "TRANSIT_SEGMENT": [ + "@schedule_time", + "@trantime_seg", + "@board_cost", + "@invehicle_cost", + ], + } + for domain, attrs in attributes.items(): + for name in attrs: + attr = scenario.extra_attribute(name) + if attr is not None: + scenario.delete_extra_attribute(name) + scenario.create_extra_attribute(domain, name) + + network = self.transit_networks[period] + if self.config.get( + "override_connectors", False + ): # don't run prepare connector, connectors are created in lasso + self.prepare_connectors(network, period) + self.distribute_nntime(network) + self.update_link_trantime(network) + # self.calc_link_unreliability(network, period) + if self.config.use_fares: + self.apply_fares(scenario, network, period) + if self.config.get("split_connectors_to_prevent_walk", False): + self.split_tap_connectors_to_prevent_walk(network) + # TODO: missing the input data files for apply station attributes + # self.apply_station_attributes(input_dir, network) + scenario.publish_network(network) + + for time_period in self.time_period_names: + # self.update_auto_times(time_period) # run in transit_assign component + self._update_pnr_penalty(time_period) + if self.config.override_connector_times: + self._update_connector_times(time_period) + + def validate_inputs(self): + """Validate the inputs.""" + # TODO + + @property + def transit_emmebank(self): + if not self._transit_emmebank: + self._transit_emmebank = self.controller.emme_manager.transit_emmebank + return self._transit_emmebank + + @property + def highway_emmebank(self): + if not self._highway_emmebank: + self._highway_emmebank = self.controller.emme_manager.highway_emmebank + return self._highway_emmebank + + @property + def transit_scenarios(self): + if self._transit_scenarios is None: + self._transit_scenarios = { + tp: self.transit_emmebank.scenario(tp) for tp in self.time_period_names + } + return self._transit_scenarios + + @property + def highway_scenarios(self): + if self._highway_scenarios is None: + self._highway_scenarios = { + tp: self.highway_emmebank.scenario(tp) for tp in self.time_period_names + } + return self._highway_scenarios + + @property + def transit_networks(self): + # if self._transit_networks is None: + self._transit_networks = { + tp: self.transit_scenarios[tp].get_network() + for tp in self.time_period_names + } + return self._transit_networks + + @property + def access_connector_df(self): + if self._access_connector_df is None: + self._access_connector_df = pd.read_csv( + self.get_abs_path(self.config.input_connector_access_times_path) + ) + return self._access_connector_df + + @property + def egress_connector_df(self): + if self._egress_connector_df is None: + self._egress_connector_df = pd.read_csv( + self.get_abs_path(self.config.input_connector_egress_times_path) + ) + return self._egress_connector_df + + def update_auto_times(self, time_period: str): + """Update the auto travel times from the last auto assignment to the transit scenario. + + TODO Document steps more when understand them. + + Note: may need to remove "reliability" factor in future versions of VDF def + + Args: + time_period: time period name abbreviation + """ + + _highway_link_dict = self._get_highway_links(time_period) + _transit_link_dict = self._get_transit_links(time_period) + + for _link_id in _highway_link_dict.keys() & _transit_link_dict.keys(): + auto_time = _highway_link_dict[_link_id].auto_time + area_type = _highway_link_dict[_link_id]["@area_type"] + # use @valuetoll_dam (cents/mile) here to represent the drive alone toll + #sov_toll_per_mile = _highway_link_dict[_link_id]['@valuetoll_dam'] + link_length = _transit_link_dict[_link_id].length + facility_type = _transit_link_dict[_link_id]['@ft'] + #sov_toll = sov_toll_per_mile * link_length/100 + + # using the @valuetoll_da to get drive alone toll + sov_toll = _highway_link_dict[_link_id]['@valuetoll_da'] + + _transit_link_dict[_link_id]["@drive_toll"] = sov_toll + + if auto_time > 0: + # https://github.com/BayAreaMetro/travel-model-one/blob/master/model-files/scripts/skims/PrepHwyNet.job#L106 + tran_speed = 60 * link_length/auto_time + if (facility_type<=4 or facility_type==8) and (tran_speed<6): + tran_speed = 6 + _transit_link_dict[_link_id]["@trantime"] = 60 * link_length/tran_speed + elif (tran_speed<3): + tran_speed = 3 + _transit_link_dict[_link_id]["@trantime"] = 60 * link_length/tran_speed + else: + _transit_link_dict[_link_id]["@trantime"] = auto_time + # data1 is the auto time used in Mixed-Mode transit assigment + _transit_link_dict[_link_id].data1 = (_transit_link_dict[_link_id]["@trantime"] + + 60*sov_toll/self.config.value_of_time) + # bus time calculation + if facility_type in [1,2,3,8]: + delayfactor = 0.0 + else: + if area_type in [0,1]: + delayfactor = 2.46 + elif area_type in [2,3]: + delayfactor = 1.74 + elif area_type==4: + delayfactor = 1.14 + else: + delayfactor = 0.08 + bus_time = _transit_link_dict[_link_id]["@trantime"] + (delayfactor * link_length) + _transit_link_dict[_link_id]["@trantime"] = bus_time + + # TODO document this! Consider copying to another method. + # set us1 (segment data1), used in ttf expressions, from @trantime + _transit_net = self._transit_networks[time_period] + _transit_scenario = self.transit_scenarios[time_period] + + for segment in _transit_net.transit_segments(): + # ? why would we only do this is schedule time was negative -- ES + if segment["@schedule_time"] <= 0 and segment.link is not None: + # ? what is "data1" and why do we need to update all these? + segment["data1"] = segment["@trantime_seg"] = segment.link["@trantime"] + + _update_attributes = { + "TRANSIT_SEGMENT": ["@trantime_seg", "data1"], + "LINK": ["@trantime", "@drive_toll"], + } + self.emme_manager.copy_attribute_values( + _transit_net, _transit_scenario, _update_attributes + ) + + def _update_pnr_penalty(self, time_period: str): + """Add the parking penalties to pnr parking lots. + + Args: + time_period: time period name abbreviation + """ + _transit_net = self._transit_networks[time_period] + _transit_scenario = self.transit_scenarios[time_period] + deflator = self.config.fare_2015_to_2000_deflator + + for segment in _transit_net.transit_segments(): + if "BART_acc" in segment.id: + if "West Oakland" in segment.id: + segment["@board_cost"] = 12.4 * deflator + else: + segment["@board_cost"] = 3.0 * deflator + elif "Caltrain_acc" in segment.id: + segment["@board_cost"] = 5.5 * deflator + + _update_attributes = { + "TRANSIT_SEGMENT": ["@board_cost"] + } + self.emme_manager.copy_attribute_values( + _transit_net, _transit_scenario, _update_attributes + ) + + def _initialize_link_attribute(self, time_period, attr_name): + """ + + Delete attribute in network object to reinitialize to default values + + Args: + network (_type_): _description_ + attre_name (_type_): _description_ + """ + _network = self._transit_networks[time_period] + _scenario = self._transit_scenarios[time_period] + if _scenario.extra_attribute(attr_name) is None: + _scenario.create_extra_attribute("LINK", attr_name) + if attr_name in _network.attributes("LINK"): + _network.delete_attribute("LINK", attr_name) + _network.create_attribute("LINK", attr_name, 9999) + + def _update_connector_times(self, time_period: str): + """Set the connector times from the source connector times files. + + See also _process_connector_file + + Args: + time_period: time period name abbreviation + """ + # walk time attributes per skim set + + _scenario = self.transit_scenarios[time_period] + _network = self.transit_networks[time_period] + _update_node_attributes = {"NODE": ["@taz_id", "#node_id"]} + + # ? what purpose do all these copy attribute values serve? why wouldn't taz and node + # ID already be there? -- ES + self.emme_manager.copy_attribute_values( + _scenario, + _network, + _update_node_attributes, + ) + + _transit_class_attr_map = { + _tclass.skim_set_id: f"@walk_time_{_tclass.name.lower()}" + for _tclass in self.config.classes + } + + for attr_name in _transit_class_attr_map.values(): + self._initialize_link_attribute(time_period, attr_name) + + _connectors_df = self._get_centroid_connectors_as_df(time_period=time_period) + _access_df = self.access_connector_df.loc[ + self.access_connector_df.time_period == time_period + ] + _egress_df = self.access_connector_df.loc[ + self.egress_connector_df.time_period == time_period + ] + + # TODO check logic here. It was hard to follow previously. + _walktime_attrs = [ + f"@walk_time_{_tclass.name.lower()}" for _tclass in self.comfig.classes + ] + + _connectors_df.merge(_access_df[["A", "B"] + _walktime_attrs], on=["A", "B"]) + _connectors_df.merge(_egress_df[["A", "B"] + _walktime_attrs], on=["A", "B"]) + + # messy because can't access EmmeLink attributes in dataframe-like method + for row in _connectors_df.iterrows(): + for _attr in _walktime_attrs: + row.link[_attr] = row[_attr] + + _connector_links = _connectors_df.link.tolist() + + self.emme_manager.copy_attribute_values( + _network, _scenario, {"LINK": _connector_links} + ) + + def _get_centroid_connectors_as_df(self, time_period: str) -> pd.DataFrame: + """Returns a datafra,e of centroid connector links and A and B ids. + + Args: + time_period (str): time period abbreviation. + + Returns: + pd.DataFrame: DataFrame of centroid connectors. + """ + connectors_df = pd.DataFrame() + # lookup adjacent stop ID (also accounts for connector splitting) + _network = self.transit_networks[time_period] + for zone in _network.centroids(): + taz_id = int(zone["@taz_id"]) + for link in zone.outgoing_links(): + connectors_df.append( + {"A": taz_id, "B": int(link.j_node["#node_id"]), "link": link}, + ignore_index=True, + ) + for link in zone.incoming_links(): + connectors_df.append( + {"A": int(link.i_node["#node_id"]), "B": taz_id, "link": link}, + ignore_index=True, + ) + return connectors_df + + def _get_transit_links( + self, + time_period: str, + ): + """Create dictionary of link ids mapped to attributes. + + Args: + time_period (str): time period abbreviation + """ + _transit_scenario = self.transit_scenarios[time_period] + _transit_net = self.transit_networks[time_period] + transit_attributes = { + "LINK": ["#link_id", "@trantime", "@ft"], + "TRANSIT_SEGMENT": ["@schedule_time", "@trantime_seg", "data1"], + } + self.emme_manager.copy_attribute_values( + _transit_scenario, _transit_net, transit_attributes + ) + _transit_link_dict = { + tran_link["#link_id"]: tran_link for tran_link in _transit_net.links() + } + return _transit_link_dict + + def _get_highway_links( + self, + time_period: str, + ): + """Create dictionary of link ids mapped to auto travel times. + + Args: + time_period (str): time period abbreviation + """ + _highway_scenario = self.highway_scenarios[time_period] + if not _highway_scenario.has_traffic_results: + return {} + _highway_net = _highway_scenario.get_partial_network( + ["LINK"], include_attributes=False + ) + + highway_attributes = {"LINK": ["#link_id", + "auto_time", + "@lanes", + "@area_type", + "@valuetoll_da"]} + + self.emme_manager.copy_attribute_values( + _highway_scenario, _highway_net, highway_attributes + ) + # TODO can we just get the link attributes as a DataFrame and merge them? + auto_link_dict = { + auto_link["#link_id"]: auto_link + for auto_link in _highway_net.links() + } + return auto_link_dict + + def prepare_connectors(self, network, period): + for node in network.centroids(): + for link in node.outgoing_links(): + network.delete_link(link.i_node, link.j_node) + for link in node.incoming_links(): + network.delete_link(link.i_node, link.j_node) + period_name = period.lower() + access_modes = set() + egress_modes = set() + for mode_data in self.controller.config.transit.modes: + if mode_data["type"] == "ACCESS": + access_modes.add(network.mode(mode_data["mode_id"])) + if mode_data["type"] == "EGRESS": + egress_modes.add(network.mode(mode_data["mode_id"])) + tazs = dict((int(n["@taz_id"]), n) for n in network.centroids()) + nodes = dict((int(n["#node_id"]), n) for n in network.regular_nodes()) + with open( + self.get_abs_path(self.config.input_connector_access_times_path), "r" + ) as f: + header = next(f).split(",") + for line in f: + tokens = line.split(",") + data = dict(zip(header, tokens)) + if data["time_period"].lower() == period_name: + taz = tazs[int(data["from_taz"])] + stop = nodes[int(data["to_stop"])] + if network.link(taz, stop) is None: + connector = network.create_link(taz, stop, access_modes) + with open( + self.get_abs_path(self.config.input_connector_egress_times_path), "r" + ) as f: + header = next(f).split(",") + for line in f: + tokens = line.split(",") + data = dict(zip(header, tokens)) + if data["time_period"].lower() == period_name: + taz = tazs[int(data["to_taz"])] + stop = nodes[int(data["from_stop"])] + if network.link(stop, taz) is None: + connector = network.create_link(stop, taz, egress_modes) + + def distribute_nntime(self, network): + for line in network.transit_lines(): + total_nntime = sum( + segment["@nntime"] for segment in line.segments(include_hidden=True) + ) + if total_nntime == 0: + continue + total_length = 0 + segments_for_current_nntime = [] + for segment in line.segments(include_hidden=True): + nntime = segment["@nntime"] + if nntime > 0: + for nn_seg in segments_for_current_nntime: + nn_seg["@schedule_time"] = nntime * ( + nn_seg.link.length / total_length + ) + segments_for_current_nntime = [] + total_length = 0 + segments_for_current_nntime.append(segment) + total_length += segment.link.length if segment.link else 0 + + @staticmethod + def update_link_trantime(network): + # if nntime exists, use that for ivtt, else use the link trantime + for line in network.transit_lines(): + for segment in line.segments(include_hidden=False): + if segment["@schedule_time"] > 0: + segment.data1 = segment["@trantime_seg"] = segment["@schedule_time"] + else: + segment.data1 = segment["@trantime_seg"] = segment.link["@trantime"] + segment.transit_time_func = 2 + + def split_tap_connectors_to_prevent_walk(self, network): + tap_stops = _defaultdict(lambda: []) + new_node_id = IDGenerator(1, network) + all_transit_modes = set( + [mode for mode in network.modes() if mode.type == "TRANSIT"] + ) + node_attributes = network.attributes("NODE") + node_attributes.remove("x") + node_attributes.remove("y") + link_attributes_reset = ["length"] + + mode_table = self.controller.config.transit.modes + walk_modes = set() + access_modes = set() + egress_modes = set() + for mode_data in mode_table: + if mode_data["type"] == "WALK": + walk_modes.add(network.mode(mode_data["mode_id"])) + if mode_data["type"] == "ACCESS": + access_modes.add(network.mode(mode_data["mode_id"])) + if mode_data["type"] == "EGRESS": + egress_modes.add(network.mode(mode_data["mode_id"])) + + # Mark TAP adjacent stops and split TAP connectors + for centroid in network.centroids(): + out_links = list(centroid.outgoing_links()) + for link in out_links: + real_stop = link.j_node + has_adjacent_transfer_links = False + has_adjacent_walk_links = False + for stop_link in real_stop.outgoing_links(): + if stop_link == link.reverse_link: + continue + if walk_modes.intersection(stop_link.modes): + has_adjacent_transfer_links = True + if egress_modes.intersection(stop_link.modes): + has_adjacent_walk_links = True + + if has_adjacent_transfer_links or has_adjacent_walk_links: + length = link.length + tap_stop = network.split_link( + centroid, + real_stop, + next(new_node_id), + include_reverse=True, + proportion=0.5, + ) + for attr in node_attributes: + tap_stop[attr] = real_stop[attr] + tap_stops[real_stop].append(tap_stop) + transit_access_links = [ + (real_stop, tap_stop), + (tap_stop, real_stop), + ] + for i_node, j_node in transit_access_links: + t_link = network.link(i_node, j_node) + if t_link is None: + t_link = network.create_link( + i_node, j_node, all_transit_modes + ) + else: + t_link.modes = all_transit_modes + for attr in link_attributes_reset: + t_link[attr] = 0 + egress_links = [ + (network.link(tap_stop, centroid), egress_modes), + (network.link(centroid, tap_stop), access_modes), + ] + for t_link, modes in egress_links: + if t_link is None: + continue + t_link.modes = modes + t_link.length = length + + line_attributes = network.attributes("TRANSIT_LINE") + seg_attributes = network.attributes("TRANSIT_SEGMENT") + # attributes referring to in-vehicle components which should be set to 0 on virtual stop segments + seg_invehicle_attrs = [ + "@invehicle_cost", + "data1", + "@trantime_seg", + "@schedule_time", + "@nntime", + ] + + # re-route the transit lines through the new TAP-stops + for line in network.transit_lines(): + # store segment data for re-routing + seg_data = {} + itinerary = [] + tap_segments = [] + for seg in line.segments(include_hidden=True): + seg_data[(seg.i_node, seg.j_node, seg.loop_index)] = dict( + (k, seg[k]) for k in seg_attributes + ) + itinerary.append(seg.i_node.number) + if seg.i_node in tap_stops and ( + seg.allow_boardings or seg.allow_alightings + ): + # insert tap_stop, real_stop loop after tap_stop + real_stop = seg.i_node + tap_access = [] + tap_egress = [] + for tap_stop in tap_stops[real_stop]: + itinerary.extend([tap_stop.number, real_stop.number]) + tap_access.append(len(itinerary) - 3) + tap_egress.append(len(itinerary) - 2) + real_seg = len(itinerary) - 1 + # track new segments to update stopping pattern + tap_segments.append( + {"access": tap_access, "egress": tap_egress, "real": real_seg} + ) + + if tap_segments: + # store line data for re-routing + line_data = dict((k, line[k]) for k in line_attributes) + line_data["id"] = line.id + line_data["vehicle"] = line.vehicle + # delete old line, then re-create on new, re-routed itinerary + network.delete_transit_line(line) + + new_line = network.create_transit_line( + line_data.pop("id"), line_data.pop("vehicle"), itinerary + ) + # copy line attributes back + for k, v in line_data.items(): + new_line[k] = v + # copy segment attributes back + for seg in new_line.segments(include_hidden=True): + data = seg_data.get((seg.i_node, seg.j_node, seg.loop_index), {}) + for k, v in data.items(): + seg[k] = v + # set boarding, alighting and dwell time on new tap access / egress segments + for tap_ref in tap_segments: + real_seg = new_line.segment(tap_ref["real"]) + for access_ref in tap_ref["access"]: + access_seg = new_line.segment(access_ref) + for k in seg_attributes: + access_seg[k] = real_seg[k] + access_seg.allow_boardings = False + access_seg.allow_alightings = False + access_seg.transit_time_func = 1 # special 0-cost ttf + for attr_name in seg_invehicle_attrs: + access_seg[attr_name] = 0 + access_seg.dwell_time = 0 + + first_access_seg = new_line.segment(tap_ref["access"][0]) + first_access_seg.allow_alightings = real_seg.allow_alightings + first_access_seg.dwell_time = real_seg.dwell_time + + for egress_ef in tap_ref["egress"]: + egress_seg = new_line.segment(egress_ef) + for k in seg_attributes: + egress_seg[k] = real_seg[k] + egress_seg.allow_boardings = real_seg.allow_boardings + egress_seg.allow_alightings = real_seg.allow_alightings + egress_seg.transit_time_func = 1 # special 0-cost ttf + for attr_name in seg_invehicle_attrs: + egress_seg[attr_name] = 0 + egress_seg.dwell_time = 0 + + real_seg.allow_alightings = False + real_seg.dwell_time = 0 + + def apply_fares(self, scenario, network, period): + apply_fares = ApplyFares(self.controller) + apply_fares.scenario = scenario + apply_fares.network = network + apply_fares.period = period + apply_fares.run() + + +class IDGenerator(object): + """Generate available Node IDs.""" + + def __init__(self, start, network): + """Return new Emme network attribute with details as defined.""" + self._number = start + self._network = network + + def next(self): + """Return the next valid node ID number.""" + while True: + if self._network.node(self._number) is None: + break + self._number += 1 + return self._number + + def __next__(self): + """Return the next valid node ID number.""" + return self.next() + + +class ApplyFares(Component): + def __init__(self, controller: RunController): + """Initialize component. + + Args: + controller: parent Controller object + """ + super().__init__(controller) + + self.scenario = None + self.network = None + self.period = "" + self.config = self.controller.config.transit + + self.dot_far_file = self.get_abs_path(self.config.fares_path) + self.fare_matrix_file = self.get_abs_path(self.config.fare_matrix_path) + + self._log = [] + + def validate_inputs(self): + # TODO + pass + + def run(self): + self._log = [] + faresystems = self.parse_dot_far_file() + fare_matrices = self.parse_fare_matrix_file() + + try: + # network = self.network = self.scenario.get_network() + network = self.network + self.create_attribute( + "TRANSIT_SEGMENT", "@board_cost", self.scenario, network + ) + self.create_attribute( + "TRANSIT_SEGMENT", "@invehicle_cost", self.scenario, network + ) + # identify the lines by faresystem + for line in network.transit_lines(): + fs_id = int(line["#faresystem"]) + try: + fs_data = faresystems[fs_id] + except KeyError: + self._log.append( + { + "type": "text", + "content": ( + f"Line {line.id} has #faresystem '{fs_id}' which was " + "not found in fares.far table" + ), + } + ) + continue + fs_data["LINES"].append(line) + fs_data["NUM LINES"] += 1 + fs_data["NUM SEGMENTS"] += len(list(line.segments())) + # Set final hidden segment allow_boardings to False so that the boarding cost is not + # calculated for this segment (has no next segment) + line.segment(-1).allow_boardings = False + + self._log.append({"type": "header", "content": "Base fares by faresystem"}) + for fs_id, fs_data in faresystems.items(): + self._log.append( + { + "type": "text", + "content": "FAREZONE {}: {} {}".format( + fs_id, fs_data["STRUCTURE"], fs_data["NAME"] + ), + } + ) + lines = fs_data["LINES"] + fs_data["MODE_SET"] = set(l.mode.id for l in lines) + fs_data["MODES"] = ", ".join(fs_data["MODE_SET"]) + if fs_data["NUM LINES"] == 0: + self._log.append( + { + "type": "text2", + "content": "No lines associated with this faresystem", + } + ) + elif fs_data["STRUCTURE"] == "FLAT": + self.generate_base_board(lines, fs_data["IBOARDFARE"]) + elif fs_data["STRUCTURE"] == "FROMTO": + fare_matrix = fare_matrices[fs_data["FAREMATRIX ID"]] + self.generate_fromto_approx(network, lines, fare_matrix, fs_data) + + self.faresystem_distances(faresystems) + faresystem_groups = self.group_faresystems(faresystems) + journey_levels, mode_map = self.generate_transfer_fares( + faresystems, faresystem_groups, network + ) + self.save_journey_levels("ALLPEN", journey_levels) + # local_modes = [] + # premium_modes = [] + # for mode in self.config.modes: + # if mode.type == "LOCAL": + # local_modes.extend(mode_map[mode.mode_id]) + # if mode.type == "PREMIUM": + # premium_modes.extend(mode_map[mode.mode_id]) + # local_levels = self.filter_journey_levels_by_mode( + # local_modes, journey_levels + # ) + # self.save_journey_levels("BUS", local_levels) + # premium_levels = self.filter_journey_levels_by_mode( + # premium_modes, journey_levels + # ) + # self.save_journey_levels("PREM", premium_levels) + + except Exception as error: + self._log.append({"type": "text", "content": "error during apply fares"}) + self._log.append({"type": "text", "content": str(error)}) + self._log.append({"type": "text", "content": _traceback.format_exc()}) + raise + finally: + log_content = [] + header = [ + "NUMBER", + "NAME", + "NUM LINES", + "NUM SEGMENTS", + "MODES", + "FAREMATRIX ID", + "NUM ZONES", + "NUM MATRIX RECORDS", + ] + for fs_id, fs_data in faresystems.items(): + log_content.append([str(fs_data.get(h, "")) for h in header]) + self._log.insert( + 0, + { + "content": log_content, + "type": "table", + "header": header, + "title": "Faresystem data", + }, + ) + + self.log_report() + self.log_text_report() + + self.scenario.publish_network(network) + + return journey_levels + + def parse_dot_far_file(self): + data = {} + numbers = [] + with open(self.dot_far_file, "r") as f: + for line in f: + fs_data = {} + word = [] + key = None + for c in line: + if key == "FAREFROMFS": + word.append(c) + elif c == "=": + key = "".join(word) + word = [] + elif c == ",": + fs_data[key.strip()] = "".join(word) + key = None + word = [] + elif c == "\n": + pass + else: + word.append(c) + fs_data[key.strip()] = "".join(word) + + fs_data["NUMBER"] = int(fs_data["FARESYSTEM NUMBER"]) + if fs_data["STRUCTURE"] != "FREE": + fs_data["FAREFROMFS"] = [ + float(x) for x in fs_data["FAREFROMFS"].split(",") + ] + if fs_data["STRUCTURE"] == "FLAT": + fs_data["IBOARDFARE"] = float(fs_data["IBOARDFARE"]) + elif fs_data["STRUCTURE"] == "FROMTO": + fmi, one, farematrix_id = fs_data["FAREMATRIX"].split(".") + fs_data["FAREMATRIX ID"] = int(farematrix_id) + fs_data["LINES"] = [] + fs_data["NUM LINES"] = 0 + fs_data["NUM SEGMENTS"] = 0 + numbers.append(fs_data["NUMBER"]) + + data[fs_data["NUMBER"]] = fs_data + for fs_data in data.values(): + if "FAREFROMFS" in fs_data: + fs_data["FAREFROMFS"] = dict(zip(numbers, fs_data["FAREFROMFS"])) + return data + + def parse_fare_matrix_file(self): + data = _defaultdict(lambda: _defaultdict(dict)) + with open(self.fare_matrix_file, "r") as f: + for i, line in enumerate(f): + if line: + tokens = line.split() + if len(tokens) != 4: + raise Exception( + "FareMatrix file line {}: expecting 4 values".format(i) + ) + system, orig, dest, fare = tokens + data[int(system)][int(orig)][int(dest)] = float(fare) + return data + + def generate_base_board(self, lines, board_fare): + self._log.append( + { + "type": "text2", + "content": "Set @board_cost to {} on {} lines".format( + board_fare, len(lines) + ), + } + ) + for line in lines: + for segment in line.segments(): + segment["@board_cost"] = board_fare + + def generate_fromto_approx(self, network, lines, fare_matrix, fs_data): + network.create_attribute("LINK", "invehicle_cost") + network.create_attribute("LINK", "board_cost") + farezone_warning1 = ( + "Warning: faresystem {} estimation: on line {}, node {} " + "does not have a valid @farezone ID. Using {} valid farezone {}." + ) + + fs_data["NUM MATRIX RECORDS"] = 0 + valid_farezones = set(fare_matrix.keys()) + for mapping in fare_matrix.values(): + zones = list(mapping.keys()) + fs_data["NUM MATRIX RECORDS"] += len(zones) + valid_farezones.update(set(zones)) + fs_data["NUM ZONES"] = len(valid_farezones) + valid_fz_str = ", ".join([str(x) for x in valid_farezones]) + self._log.append( + { + "type": "text2", + "content": "{} valid zones: {}".format( + fs_data["NUM ZONES"], valid_fz_str + ), + } + ) + + valid_links = set([]) + zone_nodes = _defaultdict(lambda: set([])) + for line in lines: + prev_farezone = 0 + for seg in line.segments(include_hidden=True): + if seg.link: + valid_links.add(seg.link) + if seg.allow_alightings or seg.allow_boardings: + farezone = int(seg.i_node["@farezone"]) + if farezone not in valid_farezones: + if prev_farezone == 0: + # DH added first farezone fix instead of exception + prev_farezone = list(valid_farezones)[0] + src_msg = "first" + else: + src_msg = "previous" + farezone = prev_farezone + self._log.append( + { + "type": "text3", + "content": farezone_warning1.format( + fs_data["NUMBER"], + line, + seg.i_node, + src_msg, + prev_farezone, + ), + } + ) + else: + prev_farezone = farezone + zone_nodes[farezone].add(seg.i_node) + self._log.append( + { + "type": "text2", + "content": "Farezone IDs and node count: %s" + % (", ".join(["%s: %s" % (k, len(v)) for k, v in zone_nodes.items()])), + } + ) + + # Two cases: + # - zone / area fares with boundary crossings, different FS may overlap: + # handle on a line-by-line bases with boarding and incremental segment costs + # for local and regional bus lines + # - station-to-station fares + # handle as an isolated system with the same costs on for all segments on a link + # and from boarding nodes by direction. + # Used mostly for BART, but also used Amtrack, some ferries and express buses + # Can support multiple boarding stops with same farezone provided it is an isolated leg, + # e.g. BART zone 85 Oakland airport connector (when operated a bus with multiple stops). + + count_single_node_zones = 0.0 + count_multi_node_zones = 0.0 + for zone, nodes in zone_nodes.items(): + if len(nodes) > 1: + count_multi_node_zones += 1.0 + else: + count_single_node_zones += 1.0 + # use station-to-station approximation if >90% of zones are single node + is_area_fare = ( + count_multi_node_zones / (count_multi_node_zones + count_single_node_zones) + > 0.1 + ) + + if is_area_fare: + self.zone_boundary_crossing_approx( + lines, valid_farezones, fare_matrix, fs_data + ) + else: + self.station_to_station_approx( + valid_farezones, fare_matrix, zone_nodes, valid_links, network + ) + # copy costs from links to segments + for line in lines: + for segment in line.segments(): + segment["@invehicle_cost"] = max(segment.link.invehicle_cost, 0) + segment["@board_cost"] = max(segment.link.board_cost, 0) + + network.delete_attribute("LINK", "invehicle_cost") + network.delete_attribute("LINK", "board_cost") + + def zone_boundary_crossing_approx( + self, lines, valid_farezones, fare_matrix, fs_data + ): + farezone_warning1 = ( + "Warning: no value in fare matrix for @farezone ID %s " + "found on line %s at node %s (using @farezone from previous segment in itinerary)" + ) + farezone_warning2 = ( + "Warning: faresystem %s estimation on line %s: first node %s " + "does not have a valid @farezone ID. " + ) + farezone_warning3 = ( + "Warning: no entry in farematrix %s from-to %s-%s: board cost " + "at segment %s set to %s." + ) + farezone_warning4 = ( + "WARNING: the above issue has occurred more than once for the same line. " + "There is a feasible boarding-alighting on the this line with no fare defined in " + "the fare matrix." + ) + farezone_warning5 = ( + "Warning: no entry in farematrix %s from-to %s-%s: " + "invehicle cost at segment %s set to %s" + ) + matrix_id = fs_data["FAREMATRIX ID"] + + self._log.append( + {"type": "text2", "content": "Using zone boundary crossings approximation"} + ) + for line in lines: + prev_farezone = 0 + same_farezone_missing_cost = False + # Get list of stop segments + stop_segments = [ + seg + for seg in line.segments(include_hidden=True) + if (seg.allow_alightings or seg.allow_boardings) + ] + prev_seg = None + for i, seg in enumerate(stop_segments): + farezone = int(seg.i_node["@farezone"]) + if farezone not in valid_farezones: + self._log.append( + { + "type": "text3", + "content": farezone_warning1 % (farezone, line, seg.i_node), + } + ) + if prev_farezone != 0: + farezone = prev_farezone + msg = "farezone from previous stop segment," + else: + # DH added first farezone fix instead of exception + farezone = list(valid_farezones)[0] + self._log.append( + { + "type": "text3", + "content": farezone_warning2 + % (fs_data["NUMBER"], line, seg.i_node), + } + ) + msg = "first valid farezone in faresystem," + self._log.append( + { + "type": "text3", + "content": "Using %s farezone %s" % (msg, farezone), + } + ) + if seg.allow_boardings: + # get the cost travelling within this farezone as base boarding cost + board_cost = fare_matrix.get(farezone, {}).get(farezone) + if board_cost is None: + # If this entry is missing from farematrix, + # use next farezone if both previous stop and next stop are in different farezones + if ( + i == len(stop_segments) - 1 + ): # in case the last segment has missing fare + board_cost = min(fare_matrix[farezone].values()) + else: + next_seg = stop_segments[i + 1] + next_farezone = next_seg.i_node["@farezone"] + if next_farezone != farezone and prev_farezone != farezone: + board_cost = fare_matrix.get(farezone, {}).get( + next_farezone + ) + if board_cost is None: + # use the smallest fare found from this farezone as best guess + # as a reasonable boarding cost + board_cost = min(fare_matrix[farezone].values()) + self._log.append( + { + "type": "text3", + "content": farezone_warning3 + % (matrix_id, farezone, farezone, seg, board_cost), + } + ) + if same_farezone_missing_cost == farezone: + self._log.append( + {"type": "text3", "content": farezone_warning4} + ) + same_farezone_missing_cost = farezone + seg["@board_cost"] = max(board_cost, seg["@board_cost"]) + + farezone = int(seg.i_node["@farezone"]) + # Set the zone-to-zone fare increment from the previous stop + if prev_farezone != 0 and farezone != prev_farezone: + try: + invehicle_cost = ( + fare_matrix[prev_farezone][farezone] + - prev_seg["@board_cost"] + ) + prev_seg["@invehicle_cost"] = max( + invehicle_cost, prev_seg["@invehicle_cost"] + ) + except KeyError: + self._log.append( + { + "type": "text3", + "content": farezone_warning5 + % (matrix_id, prev_farezone, farezone, prev_seg, 0), + } + ) + if farezone in valid_farezones: + prev_farezone = farezone + prev_seg = seg + + def station_to_station_approx( + self, valid_farezones, fare_matrix, zone_nodes, valid_links, network + ): + network.create_attribute("LINK", "board_index", -1) + network.create_attribute("LINK", "invehicle_index", -1) + self._log.append( + { + "type": "text2", + "content": "Using station-to-station least squares estimation", + } + ) + index = 0 + farezone_area_index = {} + for link in valid_links: + farezone = link.i_node["@farezone"] + if farezone not in valid_farezones: + continue + if len(zone_nodes[farezone]) == 1: + link.board_index = index + index += 1 + link.invehicle_index = index + index += 1 + else: + # in multiple station cases ALL boardings have the same index + if farezone not in farezone_area_index: + farezone_area_index[farezone] = index + index += 1 + link.board_index = farezone_area_index[farezone] + # only zone boundary crossing links get in-vehicle index + if ( + link.j_node["@farezone"] != farezone + and link.j_node["@farezone"] in valid_farezones + ): + link.invehicle_index = index + index += 1 + + A = [] + b = [] + pq_pairs = [] + + def lookup_node(z): + try: + return next(iter(zone_nodes[z])) + except StopIteration: + return None + + for p in valid_farezones: + q_costs = fare_matrix.get(p, {}) + orig_node = lookup_node(p) + for q in valid_farezones: + cost = q_costs.get(q, "n/a") + dest_node = lookup_node(q) + pq_pairs.append((p, q, orig_node, dest_node, cost)) + if q == p or orig_node is None or dest_node is None or cost == "n/a": + continue + try: + path_links = find_path( + orig_node, + dest_node, + lambda l: l in valid_links, + lambda l: l.length, + ) + except NoPathFound: + continue + b.append(cost) + a_indices = [0] * index + + a_indices[path_links[0].board_index] = 1 + for link in path_links: + if link.invehicle_index == -1: + continue + a_indices[link.invehicle_index] = 1 + A.append(a_indices) + + # x, res, rank, s = _np.linalg.lstsq(A, b, rcond=None) + # Use scipy non-negative least squares solver + x, rnorm = _nnls(A, b) + result = [round(i, 2) for i in x] + + header = ["Boarding node", "J-node", "Farezone", "Board cost", "Invehicle cost"] + table_content = [] + for link in valid_links: + if link.board_index != -1: + link.board_cost = result[link.board_index] + if link.invehicle_index != -1: + link.invehicle_cost = result[link.invehicle_index] + if link.board_cost or link.invehicle_cost: + table_content.append( + [ + link.i_node.id, + link.j_node.id, + int(link.i_node["@farezone"]), + link.board_cost, + link.invehicle_cost, + ] + ) + + self._log.append( + {"type": "text2", "content": "Table of boarding and in-vehicle costs"} + ) + self._log.append({"content": table_content, "type": "table", "header": header}) + network.delete_attribute("LINK", "board_index") + network.delete_attribute("LINK", "invehicle_index") + + # validation and reporting + header = ["p/q"] + table_content = [] + prev_p = None + row = None + for p, q, orig_node, dest_node, cost in pq_pairs: + if prev_p != p: + header.append(p) + if row: + table_content.append(row) + row = [p] + cost = "$%.2f" % cost if isinstance(cost, float) else cost + if orig_node is None or dest_node is None: + row.append("%s, UNUSED" % (cost)) + else: + try: + path_links = find_path( + orig_node, + dest_node, + lambda l: l in valid_links, + lambda l: l.length, + ) + path_cost = path_links[0].board_cost + sum( + l.invehicle_cost for l in path_links + ) + row.append("%s, $%.2f" % (cost, path_cost)) + except NoPathFound: + row.append("%s, NO PATH" % (cost)) + prev_p = p + table_content.append(row) + + self._log.append( + { + "type": "text2", + "content": "Table of origin station p to destination station q input cost, estimated cost", + } + ) + self._log.append({"content": table_content, "type": "table", "header": header}) + + def create_attribute(self, domain, name, scenario=None, network=None, atype=None): + if scenario: + if atype is None: + if scenario.extra_attribute(name): + scenario.delete_extra_attribute(name) + scenario.create_extra_attribute(domain, name) + else: + if scenario.network_field(domain, name): + scenario.delete_network_field(domain, name) + scenario.create_network_field(domain, name, atype) + if network: + if name in network.attributes(domain): + network.delete_attribute(domain, name) + network.create_attribute(domain, name) + + def faresystem_distances(self, faresystems): + max_xfer_dist = self.config.fare_max_transfer_distance_miles * 5280.0 + self._log.append({"type": "header", "content": "Faresystem distances"}) + self._log.append( + {"type": "text2", "content": "Max transfer distance: %s" % max_xfer_dist} + ) + + def bounding_rect(shape): + if shape.bounds: + x_min, y_min, x_max, y_max = shape.bounds + return _geom.Polygon( + [(x_min, y_max), (x_max, y_max), (x_max, y_min), (x_min, y_min)] + ) + return _geom.Point() + + for fs_index, fs_data in enumerate(faresystems.values()): + stops = set([]) + for line in fs_data["LINES"]: + for stop in line.segments(True): + if stop.allow_alightings or stop.allow_boardings: + stops.add(stop.i_node) + fs_data["shape"] = _geom.MultiPoint([(stop.x, stop.y) for stop in stops]) + # fs_data["bounding_rect"] = bounding_rect(fs_data["shape"]) + fs_data["NUM STOPS"] = len(fs_data["shape"]) + fs_data["FS_INDEX"] = fs_index + + # get distances between every pair of zone systems + # determine transfer fares which are too far away to be used + for fs_id, fs_data in faresystems.items(): + fs_data["distance"] = [] + fs_data["xfer_fares"] = xfer_fares = {} + for fs_id2, fs_data2 in faresystems.items(): + if fs_data["NUM LINES"] == 0 or fs_data2["NUM LINES"] == 0: + distance = "n/a" + elif fs_id == fs_id2: + distance = 0 + else: + # Get distance between bounding boxes as first approximation + # distance = fs_data["bounding_rect"].distance(fs_data2["bounding_rect"]) + # if distance <= max_xfer_dist: + # if within tolerance get more precise distance between all stops + distance = fs_data["shape"].distance(fs_data2["shape"]) + fs_data["distance"].append(distance) + + if distance == "n/a" or distance > max_xfer_dist: + xfer = "TOO_FAR" + elif fs_data2["STRUCTURE"] == "FREE": + xfer = 0.0 + elif fs_data2["STRUCTURE"] == "FROMTO": + # Transfering to the same FS in fare matrix is ALWAYS free + # for the farezone approximation + if fs_id == fs_id2: + xfer = 0.0 + if fs_data2["FAREFROMFS"][fs_id] != 0: + self._log.append( + { + "type": "text3", + "content": "Warning: non-zero transfer within 'FROMTO' faresystem not supported", + } + ) + else: + xfer = "BOARD+%s" % fs_data2["FAREFROMFS"][fs_id] + else: + xfer = fs_data2["FAREFROMFS"][fs_id] + xfer_fares[fs_id2] = xfer + + distance_table = [["p/q"] + list(faresystems.keys())] + for fs, fs_data in faresystems.items(): + distance_table.append( + [fs] + + [ + ("%.0f" % d if isinstance(d, float) else d) + for d in fs_data["distance"] + ] + ) + self._log.append( + { + "type": "text2", + "content": "Table of distance between stops in faresystems (feet)", + } + ) + self._log.append({"content": distance_table, "type": "table"}) + + def group_faresystems(self, faresystems): + self._log.append( + {"type": "header", "content": "Faresystem groups for ALL MODES"} + ) + + def matching_xfer_fares(xfer_fares_list1, xfer_fares_list2): + for xfer_fares1 in xfer_fares_list1: + for xfer_fares2 in xfer_fares_list2: + for fs_id, fare1 in xfer_fares1.items(): + fare2 = xfer_fares2[fs_id] + if fare1 != fare2 and ( + fare1 != "TOO_FAR" and fare2 != "TOO_FAR" + ): + # if the difference between two fares are less than a number, + # then treat them as the same fare + if isinstance(fare1, float) and isinstance(fare2, float) and ( + abs(fare1 - fare2)<=2.0 + ): + continue + else: + return False + return True + + # group faresystems together which have the same transfer-to pattern, + # first pass: only group by matching mode patterns to minimize the number + # of levels with multiple modes + group_xfer_fares_mode = [] + for fs_id, fs_data in faresystems.items(): + fs_modes = fs_data["MODE_SET"] + if not fs_modes: + continue + xfers = fs_data["xfer_fares"] + is_matched = False + for xfer_fares_list, group, modes in group_xfer_fares_mode: + # only if mode sets match + if set(fs_modes) == set(modes): + is_matched = matching_xfer_fares([xfers], xfer_fares_list) + if is_matched: + group.append(fs_id) + xfer_fares_list.append(xfers) + modes.extend(fs_modes) + break + if not is_matched: + group_xfer_fares_mode.append(([xfers], [fs_id], list(fs_modes))) + + # second pass attempt to group together this set + # to minimize the total number of levels and modes + group_xfer_fares = [] + for xfer_fares_list, group, modes in group_xfer_fares_mode: + is_matched = False + for xfer_fares_listB, groupB, modesB in group_xfer_fares: + is_matched = matching_xfer_fares(xfer_fares_list, xfer_fares_listB) + if is_matched: + xfer_fares_listB.extend(xfer_fares_list) + groupB.extend(group) + modesB.extend(modes) + break + if not is_matched: + group_xfer_fares.append((xfer_fares_list, group, modes)) + + self._log.append( + { + "type": "header", + "content": "Faresystems grouped by compatible transfer fares", + } + ) + xfer_fares_table = [["p/q"] + list(faresystems.keys())] + faresystem_groups = [] + i = 0 + for xfer_fares_list, group, modes in group_xfer_fares: + xfer_fares = {} + for fs_id in faresystems.keys(): + to_fares = [f[fs_id] for f in xfer_fares_list if f[fs_id] != "TOO_FAR"] + # fare = to_fares[0] if len(to_fares) > 0 else 0.0 + if len(to_fares) == 0: + fare = 0.0 + elif all(isinstance(item, float) for item in to_fares): + # caculate the average here becasue of the edits in matching_xfer_fares function + fare = round(sum(to_fares)/len(to_fares),2) + else: + fare = to_fares[0] + xfer_fares[fs_id] = fare + faresystem_groups.append((group, xfer_fares)) + for fs_id in group: + xfer_fares_table.append( + [fs_id] + list(faresystems[fs_id]["xfer_fares"].values()) + ) + i += 1 + self._log.append( + { + "type": "text2", + "content": "Level %s faresystems: %s modes: %s" + % ( + i, + ", ".join([str(x) for x in group]), + ", ".join([str(m) for m in modes]), + ), + } + ) + + self._log.append( + { + "type": "header", + "content": "Transfer fares list by faresystem, sorted by group", + } + ) + self._log.append({"content": xfer_fares_table, "type": "table"}) + + return faresystem_groups + + def generate_transfer_fares(self, faresystems, faresystem_groups, network): + self.create_attribute("MODE", "#orig_mode", self.scenario, network, "STRING") + self.create_attribute( + "TRANSIT_LINE", "#src_mode", self.scenario, network, "STRING" + ) + self.create_attribute( + "TRANSIT_LINE", "#src_veh", self.scenario, network, "STRING" + ) + + transit_modes = set([m for m in network.modes() if m.type == "TRANSIT"]) + #remove PNR dummy route from transit modes + transit_modes -= set([m for m in network.modes() if m.description == "pnrdummy"]) + mode_desc = {m.id: m.description for m in transit_modes} + get_mode_id = network.available_mode_identifier + get_vehicle_id = network.available_transit_vehicle_identifier + + meta_mode = network.create_mode("TRANSIT", get_mode_id()) + meta_mode.description = "Meta mode" + for link in network.links(): + if link.modes.intersection(transit_modes): + link.modes |= set([meta_mode]) + lines = _defaultdict(lambda: []) + for line in network.transit_lines(): + if line.mode.id != "p": #remove PNR dummy mode + lines[line.vehicle.id].append(line) + line["#src_mode"] = line.mode.id + line["#src_veh"] = line.vehicle.id + for vehicle in network.transit_vehicles(): + if vehicle.mode.id != "p": #remove PNR dummy mode + temp_veh = network.create_transit_vehicle(get_vehicle_id(), vehicle.mode.id) + veh_id = vehicle.id + attributes = {a: vehicle[a] for a in network.attributes("TRANSIT_VEHICLE")} + for line in lines[veh_id]: + line.vehicle = temp_veh + network.delete_transit_vehicle(vehicle) + new_veh = network.create_transit_vehicle(veh_id, meta_mode.id) + for a, v in attributes.items(): + new_veh[a] = v + for line in lines[veh_id]: + line.vehicle = new_veh + network.delete_transit_vehicle(temp_veh) + for link in network.links(): + link.modes -= transit_modes + for mode in transit_modes: + network.delete_mode(mode) + + # transition rules will be the same for every journey level + transition_rules = [] + journey_levels = [ + # { + # "description": "base", + # "destinations_reachable": True, + # "transition_rules": transition_rules, + # "waiting_time": None, + # "boarding_time": None, + # "boarding_cost": None, + # } + ] + mode_map = _defaultdict(lambda: []) + level = 1 + for fs_ids, xfer_fares in faresystem_groups: + boarding_cost_id = "@from_level_%s" % level + self.create_attribute( + "TRANSIT_SEGMENT", boarding_cost_id, self.scenario, network + ) + journey_levels.append( + { + "description": "Level_%s fs: %s" + % (level, ",".join([str(x) for x in fs_ids])), + "destinations_reachable": True, + "transition_rules": transition_rules, + "waiting_time": None, + "boarding_time": None, + "boarding_cost": { + "global": None, + "at_nodes": None, + "on_lines": None, + "on_segments": { + "penalty": boarding_cost_id, + "perception_factor": 1, + }, + }, + } + ) + + level_modes = {} + level_vehicles = {} + for fs_id in fs_ids: + fs_data = faresystems[fs_id] + for line in fs_data["LINES"]: + level_mode = level_modes.get(line["#src_mode"]) + if level_mode is None: + level_mode = network.create_mode("TRANSIT", get_mode_id()) + level_mode.description = mode_desc[line["#src_mode"]] + level_mode["#orig_mode"] = line["#src_mode"] + transition_rules.append( + {"mode": level_mode.id, "next_journey_level": level} + ) + level_modes[line["#src_mode"]] = level_mode + mode_map[line["#src_mode"]].append(level_mode.id) + for segment in line.segments(): + segment.link.modes |= set([level_mode]) + new_vehicle = level_vehicles.get(line.vehicle.id) + if new_vehicle is None: + new_vehicle = network.create_transit_vehicle( + get_vehicle_id(), level_mode + ) + for a in network.attributes("TRANSIT_VEHICLE"): + new_vehicle[a] = line.vehicle[a] + level_vehicles[line.vehicle.id] = new_vehicle + line.vehicle = new_vehicle + + # set boarding cost on all lines + # xferfares is a list of transfer fares, as a number or a string "BOARD+" + a number + for line in network.transit_lines(): + to_faresystem = int(line["#faresystem"]) + try: + xferboard_cost = xfer_fares[to_faresystem] + except KeyError: + continue # line does not have a valid faresystem ID + if xferboard_cost == "TOO_FAR": + pass # use zero cost as transfer from this fs to line is impossible + elif isinstance(xferboard_cost, str) and xferboard_cost.startswith( + "BOARD+" + ): + xferboard_cost = float(xferboard_cost[6:]) + for segment in line.segments(): + if segment.allow_boardings: + segment[boarding_cost_id] = max( + xferboard_cost + segment["@board_cost"], 0 + ) + else: + for segment in line.segments(): + if segment.allow_boardings: + segment[boarding_cost_id] = max(xferboard_cost, 0) + level += 1 + + # for vehicle in network.transit_vehicles(): + # if vehicle.mode == meta_mode: + # network.delete_transit_vehicle(vehicle) + # for link in network.links(): + # link.modes -= set([meta_mode]) + # network.delete_mode(meta_mode) + self._log.append( + { + "type": "header", + "content": "Mapping from original modes to modes for transition table", + } + ) + for orig_mode, new_modes in mode_map.items(): + self._log.append( + { + "type": "text2", + "content": "%s : %s" % (orig_mode, ", ".join(new_modes)), + } + ) + return journey_levels, mode_map + + def save_journey_levels(self, name, journey_levels): + spec_dir = os.path.join( + os.path.dirname( + self.get_abs_path(self.controller.config.emme.project_path) + ), + "Specifications", + ) + path = os.path.join(spec_dir, "%s_%s_journey_levels.ems" % (self.period, name)) + with open(path, "w") as jl_spec_file: + spec = { + "type": "EXTENDED_TRANSIT_ASSIGNMENT", + "journey_levels": journey_levels, + } + _json.dump(spec, jl_spec_file, indent=4) + + def filter_journey_levels_by_mode(self, modes, journey_levels): + # remove rules for unused modes from provided journey_levels + # (restrict to provided modes) + journey_levels = _copy(journey_levels) + for level in journey_levels: + rules = level["transition_rules"] + rules = [_copy(r) for r in rules if r["mode"] in modes] + level["transition_rules"] = rules + # count level transition rules references to find unused levels + num_levels = len(journey_levels) + level_count = [0] * num_levels + + def follow_rule(next_level): + level_count[next_level] += 1 + if level_count[next_level] > 1: + return + for rule in journey_levels[next_level]["transition_rules"]: + follow_rule(rule["next_journey_level"]) + + follow_rule(0) + # remove unreachable levels + # and find new index for transition rules for remaining levels + level_map = {i: i for i in range(num_levels)} + for level_id, count in reversed(list(enumerate(level_count))): + if count == 0: + for index in range(level_id, num_levels): + level_map[index] -= 1 + del journey_levels[level_id] + # re-index remaining journey_levels + for level in journey_levels: + for rule in level["transition_rules"]: + next_level = rule["next_journey_level"] + rule["next_journey_level"] = level_map[next_level] + return journey_levels + + def log_report(self): + # manager = self.controller.emme_manager + # emme_project = manager.project + # manager.modeller(emme_project) + # PageBuilder = _m.PageBuilder + report = PageBuilder(title="Fare calculation report") + try: + for item in self._log: + if item["type"] == "header": + report.add_html( + "

%s

" % item["content"] + ) + elif item["type"] == "text": + report.add_html( + "
%s
" % item["content"] + ) + elif item["type"] == "text2": + report.add_html( + "
%s
" % item["content"] + ) + elif item["type"] == "text3": + report.add_html( + "
%s
" % item["content"] + ) + elif item["type"] == "table": + table_msg = [] + if "header" in item: + table_msg.append("") + for label in item["header"]: + table_msg.append("%s" % label) + table_msg.append("") + for row in item["content"]: + table_msg.append("") + for cell in row: + table_msg.append("%s" % cell) + table_msg.append("") + title = "

%s

" % item["title"] if "title" in item else "" + report.add_html( + """ +
+ %s + %s
+
+
+ """ + % (title, "".join(table_msg)) + ) + + except Exception as error: + # no raise during report to avoid masking real error + report.add_html("Error generating report") + report.add_html(str(error)) + report.add_html(_traceback.format_exc()) + + self.controller.emme_manager.logbook_write( + "Apply fares report %s" % self.period, report.render() + ) + + def log_text_report(self): + bank_dir = os.path.dirname( + self.get_abs_path(self.controller.config.emme.transit_database_path) + ) + timestamp = _time.strftime("%Y%m%d-%H%M%S") + path = os.path.join( + bank_dir, "apply_fares_report_%s_%s.txt" % (self.period, timestamp) + ) + with open(path, "w") as report: + try: + for item in self._log: + if item["type"] == "header": + report.write("\n%s\n" % item["content"]) + report.write("-" * len(item["content"]) + "\n\n") + elif item["type"] == "text": + report.write(" %s\n" % item["content"]) + elif item["type"] == "text2": + report.write(" %s\n" % item["content"]) + elif item["type"] == "text3": + report.write(" %s\n" % item["content"]) + elif item["type"] == "table": + table_msg = [] + cell_length = [0] * len(item["content"][0]) + if "header" in item: + for i, label in enumerate(item["header"]): + cell_length[i] = max(cell_length[i], len(str(label))) + for row in item["content"]: + for i, cell in enumerate(row): + cell_length[i] = max(cell_length[i], len(str(cell))) + if "header" in item: + row_text = [] + for label, length in zip(item["header"], cell_length): + row_text.append("%-*s" % (length, label)) + table_msg.append(" ".join(row_text)) + for row in item["content"]: + row_text = [] + for cell, length in zip(row, cell_length): + row_text.append("%-*s" % (length, cell)) + table_msg.append(" ".join(row_text)) + if "title" in item: + report.write("%s\n" % item["title"]) + report.write("-" * len(item["title"]) + "\n") + table_msg.extend(["", ""]) + report.write("\n".join(table_msg)) + except Exception as error: + # no raise during report to avoid masking real error + report.write("Error generating report\n") + report.write(str(error) + "\n") + report.write(_traceback.format_exc()) diff --git a/tm2py/components/network/transit/transit_skim.py b/tm2py/components/network/transit/transit_skim.py index 051044be..4a0c870d 100644 --- a/tm2py/components/network/transit/transit_skim.py +++ b/tm2py/components/network/transit/transit_skim.py @@ -1,9 +1,1010 @@ -"""Transit skims module""" +"""Transit skims module.""" -from ...component import Component +from __future__ import annotations -# from ....controller import RunController +import itertools +import os +from collections import defaultdict, namedtuple +from contextlib import contextmanager as _context +from math import inf +from time import time +from typing import TYPE_CHECKING, Collection, Dict, List, Tuple, Union + +import numpy as np + +from tm2py import tools +from tm2py.components.component import Component +from tm2py.emme.matrix import MatrixCache, OMXManager +from tm2py.logger import LogStartEnd +from tm2py.omx import NumpyArray + +if TYPE_CHECKING: + from tm2py.controller import RunController + +Skimproperty = namedtuple("Skimproperty", "name desc") class TransitSkim(Component): - """Run transit skims""" + """Transit skim calculation methods.""" + + def __init__(self, controller: "RunController"): + """Constructor for TransitSkim class. + + Args: + controller: The RunController instance. + """ + super().__init__(controller) + self.config = self.controller.config.transit + self._emmebank = None + + self._networks = None + self._scenarios = None + self._matrix_cache = None + self._skim_properties = None + self._skim_matrices = { + k: None + for k in itertools.product( + self.time_period_names, + self.config.classes, + self.skim_properties, + ) + } + + def validate_inputs(self): + """Validate inputs.""" + # TODO add input validation + pass + + @property + def emmebank(self): + if not self._emmebank: + self._emmebank = self.controller.emme_manager.transit_emmebank + return self._emmebank + + @property + def scenarios(self): + if self._scenarios is None: + self._scenarios = { + tp: self.emmebank.scenario(tp) for tp in self.time_period_names + } + return self._scenarios + + @property + def networks(self): + if self._networks is None: + self._networks = { + tp: self.scenarios[tp].get_partial_network( + ["TRANSIT_SEGMENT"], include_attributes=False + ) + for tp in self.time_period_names + } + return self._networks + + @property + def matrix_cache(self): + if self._matrix_cache is None: + self._matrix_cache = { + tp: MatrixCache(self.scenarios[tp]) for tp in self.time_period_names + } + return self._matrix_cache + + @LogStartEnd("Transit skims") + def run(self): + """Run transit skims.""" + self.emmebank_skim_matrices( + self.time_period_names, self.config.classes, self.skim_properties + ) + with self.logger.log_start_end(f"period transit skims"): + for _time_period in self.time_period_names: + with self.controller.emme_manager.logbook_trace( + f"Transit skims for period {_time_period}" + ): + for _transit_class in self.config.classes: + self.run_skim_set(_time_period, _transit_class) + self._export_skims(_time_period, _transit_class) + if self.logger.debug_enabled: + self._log_debug_report(_time_period) + + @property + def skim_matrices(self): + return self._skim_matrices + + @property + def skim_properties(self): + """List of Skim Property named tuples: name, description. + + TODO put these in config. + """ + if self._skim_properties is None: + from collections import namedtuple + + # TODO config + self._skim_properties = [] + + _basic_skims = [ + ("IWAIT", "first wait time"), + ("XWAIT", "transfer wait time"), + ("WAIT", "total wait time"), + ("FARE", "fare"), + ("BOARDS", "num boardings"), + ("WAUX", "auxiliary walk time"), + ("DTIME", "access and egress drive time"), + ("DDIST", "access and egress drive distance"), + ("WACC", "access walk time"), + ("WEGR", "egress walk time"), + ("IVT", "total in-vehicle time"), + ("IN_VEHICLE_COST", "in-vehicle cost"), + ] + self._skim_properties += [ + Skimproperty(_name, _desc) for _name, _desc in _basic_skims + ] + for mode in self.config.modes: + if (mode.assign_type == "TRANSIT") and (mode.type != "PNR_dummy"): + desc = mode.description or mode.name + self._skim_properties.append( + Skimproperty( + f"IVT{mode.name}", + f"{desc} in-vehicle travel time"[:40], + ) + ) + if self.config.use_ccr: + self._skim_properties.extend( + [ + Skimproperty("LINKREL", "Link reliability"), + Skimproperty("CROWD", "Crowding penalty"), + Skimproperty("EAWT", "Extra added wait time"), + Skimproperty("CAPPEN", "Capacity penalty"), + ] + ) + if self.config.congested_transit_assignment: + self._skim_properties.extend( + [ + Skimproperty("CROWD", "Crowding penalty"), + Skimproperty("TRIM", "used to trim demands"), + Skimproperty("XBOATIME", "transfer boarding time penalty"), + Skimproperty("DTOLL", "drive access or egress toll price"), + ] + ) + return self._skim_properties + + def emmebank_skim_matrices( + self, + time_periods: List[str] = None, + transit_classes=None, + skim_properties: Skimproperty = None, + ) -> dict: + """Gets skim matrices from emmebank, or lazily creates them if they don't already exist.""" + create_matrix = self.controller.emme_manager.tool( + "inro.emme.data.matrix.create_matrix" + ) + if time_periods is None: + time_periods = self.time_period_names + if not set(time_periods).issubset(set(self.time_period_names)): + raise ValueError( + f"time_periods ({time_periods}) must be subset of time_period_names ({self.time_period_names})." + ) + + if transit_classes is None: + transit_classes = self.config.classes + if not set(transit_classes).issubset(set(self.config.classes)): + raise ValueError( + f"time_periods ({transit_classes}) must be subset of time_period_names ({self.config.transit_classes})." + ) + + if skim_properties is None: + skim_properties = self.skim_properties + if not set(skim_properties).issubset(set(self.skim_properties)): + raise ValueError( + f"time_periods ({skim_properties}) must be subset of time_period_names ({self.skim_properties})." + ) + + _tp_tclass_skprop = itertools.product( + time_periods, transit_classes, skim_properties + ) + _tp_tclass_skprop_list = [] + + for _tp, _tclass, _skprop in _tp_tclass_skprop: + a = 1 + _name = f"{_tp}_{_tclass.name}_{_skprop.name}" + _desc = f"{_tp} {_tclass.description}: {_skprop.desc}" + _matrix = self.scenarios[_tp].emmebank.matrix(f'mf"{_name}"') + if not _matrix: + _matrix = create_matrix( + "mf", _name, _desc, scenario=self.scenarios[_tp], overwrite=True + ) + else: + _matrix.description = _desc + #_name = f"{_skprop.name}" + + self._skim_matrices[_name] = _matrix + _tp_tclass_skprop_list.append(_name) + + skim_matrices = { + k: v + for k, v in self._skim_matrices.items() + if k in list(_tp_tclass_skprop_list) + } + return skim_matrices + + def run_skim_set(self, time_period: str, transit_class: str): + """Run the transit skim calculations for a given time period and assignment class. + + Results are stored in transit emmebank. + + Steps: + 1. determine if using transit capacity constraint + 2. skim walk, wait time, boardings, and fares + 3. skim in vehicle time by mode + 4. mask transfers above max amount + 5. mask if doesn't have required modes + """ + use_ccr = False + congested_transit_assignment = self.config.congested_transit_assignment + if self.controller.iteration >= 1: + use_ccr = self.config.use_ccr + with self.controller.emme_manager.logbook_trace( + "First and total wait time, number of boardings, " + "fares, and total and transfer walk time" + ): + self.skim_walk_wait_boards_fares(time_period, transit_class) + with self.controller.emme_manager.logbook_trace("In-vehicle time by mode"): + self.skim_invehicle_time_by_mode(time_period, transit_class, use_ccr) + #with self.controller.emme_manager.logbook_trace( + # "Transfer boarding time penalty", + # "Drive toll" + #): + # self.skim_penalty_toll(time_period, transit_class) + with self.controller.emme_manager.logbook_trace( + "Drive distance and time", + "Walk auxiliary time, walk access time and walk egress time" + ): + self.skim_drive_walk(time_period, transit_class) + with self.controller.emme_manager.logbook_trace("Calculate crowding"): + self.skim_crowding(time_period, transit_class) + if use_ccr: + with self.controller.emme_manager.logbook_trace("CCR related skims"): + self.skim_reliability_crowding_capacity(time_period, transit_class) + #if congested_transit_assignment: + + # self.mask_above_max_transfers(time_period, transit_class) #TODO: need to test + # self.mask_if_not_required_modes(time_period, transit_class) #TODO: need to test + + def skim_walk_wait_boards_fares(self, time_period: str, transit_class: str): + """Skim wait, walk, board, and fares for a given time period and transit assignment class. + + Skim the first and total wait time, number of boardings, (transfers + 1) + fares, total walk time, total in-vehicle time. + """ + _tp_tclass = f"{time_period}_{transit_class.name}" + _network = self.networks[time_period] + _transit_mode_ids = [ + m.id for m in _network.modes() if m.type in ["TRANSIT", "AUX_TRANSIT"] + ] + spec = { + "type": "EXTENDED_TRANSIT_MATRIX_RESULTS", + "actual_first_waiting_times": f'mf"{_tp_tclass}_IWAIT"', + "actual_total_waiting_times": f'mf"{_tp_tclass}_WAIT"', + "by_mode_subset": { + "modes": _transit_mode_ids, + "avg_boardings": f'mf"{_tp_tclass}_BOARDS"', + }, + } + if self.config.use_fares: + spec["by_mode_subset"].update( + { + "actual_in_vehicle_costs": f'mf"{_tp_tclass}_IN_VEHICLE_COST"', + "actual_total_boarding_costs": f'mf"{_tp_tclass}_FARE"', + } + ) + + self.controller.emme_manager.matrix_results( + spec, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + self._calc_xfer_wait(time_period, transit_class.name) + self._calc_boardings(time_period, transit_class.name) + if self.config.use_fares: + self._calc_fares(time_period, transit_class.name) + + def _calc_xfer_walk(self, time_period, transit_class_name): + xfer_modes = [m.mode_id for m in self.config.modes if m.type == "WALK"] + tp_tclass = f"{time_period}_{transit_class_name}" + spec = { + "type": "EXTENDED_TRANSIT_MATRIX_RESULTS", + "by_mode_subset": { + "modes": xfer_modes, + "actual_aux_transit_times": f'mf"{tp_tclass}_XFERWALK"', + }, + } + self.controller.emme_manager.matrix_results( + spec, + class_name=transit_class_name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + def _calc_xfer_wait(self, time_period, transit_class_name): + """Calculate transfer wait from total wait time and initial wait time and add to Emmebank. + + TODO convert this type of calculation to numpy + """ + tp_tclass = f"{time_period}_{transit_class_name}" + spec = { + "type": "MATRIX_CALCULATION", + "constraint": { + "by_value": { + "od_values": f'mf"{tp_tclass}_WAIT"', + "interval_min": 0, + "interval_max": 9999999, + "condition": "INCLUDE", + } + }, + "result": f'mf"{tp_tclass}_XWAIT"', + "expression": f'(mf"{tp_tclass}_WAIT" - mf"{tp_tclass}_IWAIT").max.0', + } + + self.controller.emme_manager.matrix_calculator( + spec, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + def _calc_boardings(self, time_period: str, transit_class_name: str): + """Calculate # boardings from # of transfers and add to Emmebank. + + TODO convert this type of calculation to numpy + """ + _tp_tclass = f"{time_period}_{transit_class_name}" + if ("PNR_TRN_WLK" in _tp_tclass) or ("WLK_TRN_PNR"in _tp_tclass): + spec = { + "type": "MATRIX_CALCULATION", + "constraint": { + "by_value": { + "od_values": f'mf"{_tp_tclass}_BOARDS"', + "interval_min": 0, + "interval_max": 9999999, + "condition": "INCLUDE", + } + }, + # CHECK should this be BOARDS or similar, not xfers? + "result": f'mf"{_tp_tclass}_BOARDS"', + "expression": f'(mf"{_tp_tclass}_BOARDS" - 1).max.0', + } + + self.controller.emme_manager.matrix_calculator( + spec, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + def _calc_fares(self, time_period: str, transit_class_name: str): + """Calculate fares as sum in-vehicle cost and boarding cost to get the fare paid and add to Emmebank. + + TODO convert this type of calculation to numpy + """ + _tp_tclass = f"{time_period}_{transit_class_name}" + spec = { + "type": "MATRIX_CALCULATION", + "constraint": None, + "result": f'mf"{_tp_tclass}_FARE"', + "expression": f'(mf"{_tp_tclass}_FARE" + mf"{_tp_tclass}_IN_VEHICLE_COST")', + } + + self.controller.emme_manager.matrix_calculator( + spec, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + @staticmethod + def _segments_with_modes(_network, _modes: Union[Collection[str], str]): + _modes = list(_modes) + segments = [ + li.segments() for li in _network.transit_lines() if li.mode.id in _modes + ] + return segments + + def _invehicle_time_by_mode_ccr( + self, time_period: str, transit_class: str, mode_combinations + ) -> List[str]: + """Calculate in-vehicle travel time by mode using CCR and store results in Emmebank. + + Args: + time_period (_type_): time period abbreviation + transit_class (_type_): transit class name + mode_combinations (_type_): TODO + + Returns: + List of matrix names in Emmebank to sum together to get total in-vehicle travel time. + """ + + _network = self.networks[time_period] + _scenario = self.scenarios[time_period] + _tp_tclass = f"{time_period}_{transit_class.name}" + _total_ivtt_expr = [] + create_temps = self.controller.emme_manager.temp_attributes_and_restore + temp_attrs = [["TRANSIT_SEGMENT", "@mode_timtr", "base time by mode"]] + with create_temps(_scenario, temp_attrs): + for _mode_name, _modes in mode_combinations: + _network.create_attribute("TRANSIT_SEGMENT", "@mode_timtr") + _li_segs_with_mode = TransitSkim._segments_with_modes(_network, _modes) + # set temp attribute @mode_timtr to contain the non-congested in-vehicle + # times for segments of the mode of interest + for line_segment in _li_segs_with_mode: + for segment in line_segment: + segment["@mode_timtr"] = segment["@base_timtr"] + # not sure why we to copy this if we are deleting it in next line? - ES + self.controller.emme_manager.copy_attribute_values( + self.networks[time_period], + _scenario, + {"TRANSIT_SEGMENT": ["@mode_timtr"]}, + ) + self.networks[time_period].delete_attribute( + "TRANSIT_SEGMENT", "@mode_timtr" + ) + _ivtt_matrix_name = f'mf"{_tp_tclass}_IVT{_mode_name}"' + _total_ivtt_expr.append(_ivtt_matrix_name) + self._run_strategy_analysis( + time_period, + transit_class, + {"in_vehicle": "@mode_timtr"}, + f"IVT{_mode_name}", + ) + return _total_ivtt_expr + + def _invehicle_time_by_mode_no_ccr( + self, time_period: str, transit_class: str, mode_combinations + ) -> List[str]: + """Calculate in-vehicle travel time by without CCR and store results in Emmebank. + + Args: + time_period (_type_): time period abbreviation + transit_class (_type_): transit class name + mode_combinations (_type_): TODO + + Returns: List of matrix names in Emmebank to sum together to get total in-vehicle travel time. + + """ + _tp_tclass = f"{time_period}_{transit_class.name}" + _total_ivtt_expr = [] + for _mode_name, modes in mode_combinations: + _ivtt_matrix_name = f'mf"{_tp_tclass}_IVT{_mode_name}"' + _total_ivtt_expr.append(_ivtt_matrix_name) + spec = { + "type": "EXTENDED_TRANSIT_MATRIX_RESULTS", + "by_mode_subset": { + "modes": modes, + "actual_in_vehicle_times": _ivtt_matrix_name, + }, + } + self.controller.emme_manager.matrix_results( + spec, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + return _total_ivtt_expr + + def skim_invehicle_time_by_mode( + self, time_period: str, transit_class: str, use_ccr: bool = False + ) -> None: + """Skim in-vehicle by mode for a time period and transit class and store results in Emmebank. + + Args: + time_period (str): time period abbreviation + transit_class (str): transit class name + use_ccr (bool): if True, will use crowding, capacity, and reliability (ccr). + Defaults to False + + """ + mode_combinations = self._get_emme_mode_ids(transit_class, time_period) + if use_ccr: + total_ivtt_expr = self._invehicle_time_by_mode_ccr( + time_period, transit_class, mode_combinations + ) + else: + total_ivtt_expr = self._invehicle_time_by_mode_no_ccr( + time_period, transit_class, mode_combinations + ) + # sum total ivtt across all modes + self._calc_total_ivt(time_period, transit_class, total_ivtt_expr) + + def _calc_total_ivt( + self, time_period: str, transit_class: str, total_ivtt_expr: list[str] + ) -> None: + """Sums matrices to get total in vehicle time and stores in the Emmebank. + + Args: + time_period (str): time period abbreviation + transit_class (str): transit class name + total_ivtt_expr (list[str]): List of matrix names in Emmebank which have IVT to sum to get total. + """ + _tp_tclass = f"{time_period}_{transit_class.name}" + spec = { + "type": "MATRIX_CALCULATION", + "constraint": None, + "result": f'mf"{_tp_tclass }_IVT"', + "expression": "+".join(total_ivtt_expr), + } + + self.controller.emme_manager.matrix_calculator( + spec, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + def skim_drive_walk( + self, time_period: str, transit_class: str + ) -> None: + """ + """ + _tp_tclass = f"{time_period}_{transit_class.name}" + # _network = self.networks[time_period] + + # drive time here is perception factor*(drive time + toll penalty), + # will calculate the actual drive time and substract toll penalty in the following steps + spec1 = { + "type": "EXTENDED_TRANSIT_MATRIX_RESULTS", + "by_mode_subset": { + "modes": ["D"], + "actual_aux_transit_times": f'mf"{_tp_tclass}_DTIME"', + "distance": f'mf"{_tp_tclass}_DDIST"', + }, + } + # skim walk distance in walk time matrices first, + # will calculate the actual walk time and overwrite the distance in the following steps + spec2 = { + "type": "EXTENDED_TRANSIT_MATRIX_RESULTS", + "by_mode_subset": { + "modes": ["w"], + "distance": f'mf"{_tp_tclass}_WAUX"', + }, + } + spec3 = { + "type": "EXTENDED_TRANSIT_MATRIX_RESULTS", + "by_mode_subset": { + "modes": ["a"], + "distance": f'mf"{_tp_tclass}_WACC"', + }, + } + spec4 = { + "type": "EXTENDED_TRANSIT_MATRIX_RESULTS", + "by_mode_subset": { + "modes": ["e"], + "distance": f'mf"{_tp_tclass}_WEGR"', + }, + } + + self.controller.emme_manager.matrix_results( + spec1, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + self.controller.emme_manager.matrix_results( + spec2, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + self.controller.emme_manager.matrix_results( + spec3, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + self.controller.emme_manager.matrix_results( + spec4, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + drive_perception_factor = self.config.drive_perception_factor + walk_speed = self.config.walk_speed + vot = self.config.value_of_time + # divide drive time by mode specific perception factor to get the actual time + # for walk time, use walk distance/walk speed + # because the mode specific perception factors are hardcoded in the mode definition + spec_list = [ + { + "type": "MATRIX_CALCULATION", + "constraint": None, + "result": f'mf"{_tp_tclass}_DTIME"', + "expression": f'mf"{_tp_tclass}_DTIME"/{drive_perception_factor}', + }, + { + "type": "MATRIX_CALCULATION", + "constraint": None, + "result": f'mf"{_tp_tclass}_DTIME"', + "expression": f'mf"{_tp_tclass}_DTIME" - 60*mf"{_tp_tclass}_DTOLL"/{vot}', + }, + { + "type": "MATRIX_CALCULATION", + "constraint": None, + "result": f'mf"{_tp_tclass}_WAUX"', + "expression": f'mf"{_tp_tclass}_WAUX"/({walk_speed}/60)', + }, + { + "type": "MATRIX_CALCULATION", + "constraint": None, + "result": f'mf"{_tp_tclass}_WACC"', + "expression": f'mf"{_tp_tclass}_WACC"/({walk_speed}/60)', + }, + { + "type": "MATRIX_CALCULATION", + "constraint": None, + "result": f'mf"{_tp_tclass}_WEGR"', + "expression": f'mf"{_tp_tclass}_WEGR"/({walk_speed}/60)', + }, + ] + self.controller.emme_manager.matrix_calculator( + spec_list, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + def skim_penalty_toll( + self, time_period: str, transit_class: str + ) -> None: + """ + """ + # transfer boarding time penalty + self._run_strategy_analysis( + time_period, transit_class, {"boarding": "@xboard_nodepen"}, "XBOATIME" + ) + + _tp_tclass = f"{time_period}_{transit_class.name}" + if ("PNR_TRN_WLK" in _tp_tclass) or ("WLK_TRN_PNR"in _tp_tclass): + spec = { # subtract PNR boarding from total transfer boarding time penalty + "type": "MATRIX_CALCULATION", + "constraint": { + "by_value": { + "od_values": f'mf"{_tp_tclass}_XBOATIME"', + "interval_min": 0, + "interval_max": 9999999, + "condition": "INCLUDE", + } + }, + "result": f'mf"{_tp_tclass}_XBOATIME"', + "expression": f'(mf"{_tp_tclass}_XBOATIME" - 1).max.0', + } + + self.controller.emme_manager.matrix_calculator( + spec, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + # drive toll + if ("PNR_TRN_WLK" in _tp_tclass) or ("KNR_TRN_WLK" in _tp_tclass): + self._run_path_analysis( + time_period, transit_class, "ORIGIN_TO_INITIAL_BOARDING", + {"aux_transit": "@drive_toll"}, "DTOLL" + ) + elif ("WLK_TRN_PNR" in _tp_tclass) or ("WLK_TRN_KNR" in _tp_tclass): + self._run_path_analysis( + time_period, transit_class, "FINAL_ALIGHTING_TO_DESTINATION", + {"aux_transit": "@drive_toll"}, "DTOLL" + ) + + def _get_emme_mode_ids( + self, transit_class, time_period + ) -> List[Tuple[str, List[str]]]: + """Get the Emme mode IDs used in the assignment. + + Loads the #src_mode attribute on lines if fares are used, and the + @base_timtr on segments if ccr is used. + + Returns: + List of tuples of two items, the original mode name (from config) + to a list of mode IDs used in the Emme assignment. This list + will be one item if fares are not used, but will contain the fare + modes used in the journey levels mode-to-mode transfer table + generated from Apply fares. + """ + if self.config.use_fares: + self.controller.emme_manager.copy_attribute_values( + self.scenarios[time_period], + self.networks[time_period], + {"TRANSIT_LINE": ["#src_mode"]}, + ) + if self.config.use_ccr: + self.controller.emme_manager.copy_attribute_values( + self.scenarios[time_period], + self.networks[time_period], + {"TRANSIT_SEGMENT": ["@base_timtr"]}, + ) + valid_modes = [ + mode + for mode in self.config.modes + if mode.type in transit_class.mode_types and mode.assign_type == "TRANSIT" and mode.type != "PNR_dummy" + ] + if self.config.use_fares: + # map to used modes in apply fares case + fare_modes = defaultdict(lambda: set([])) + for line in self.networks[time_period].transit_lines(): + fare_modes[line["#src_mode"]].add(line.mode.id) + emme_mode_ids = [ + (mode.name, list(fare_modes[mode.mode_id])) + for mode in valid_modes + if len(list(fare_modes[mode.mode_id])) > 0 + ] + else: + emme_mode_ids = [(mode.name, [mode.mode_id]) for mode in valid_modes] + return emme_mode_ids + + def skim_reliability_crowding_capacity( + self, time_period: str, transit_class + ) -> None: + """Generate skim results for CCR assignment and stores results in Emmebank. + + Generates the following: + 1. Link Unreliability: LINKREL + 2. Crowding penalty: CROWD + 3. Extra added wait time: EAWT + 4. Capacity penalty: CAPPEN + + Args: + time_period (str): time period abbreviation + transit_class: transit class + """ + + # Link unreliability + self._run_strategy_analysis( + time_period, transit_class, {"in_vehicle": "ul1"}, "LINKREL" + ) + # Crowding penalty + self._run_strategy_analysis( + time_period, transit_class, {"in_vehicle": "@ccost"}, "CROWD" + ) + # skim node reliability, extra added wait time (EAWT) + self._run_strategy_analysis( + time_period, transit_class, {"boarding": "@eawt"}, "EAWT" + ) + # skim capacity penalty + self._run_strategy_analysis( + time_period, transit_class, {"boarding": "@capacity_penalty"}, "CAPPEN" + ) + + def skim_crowding( + self, time_period: str, transit_class + ) -> None: + """ + """ + # Crowding penalty + self._run_strategy_analysis( + time_period, transit_class, {"in_vehicle": "@ccost"}, "CROWD" + ) + + def _run_strategy_analysis( + self, + time_period: str, + transit_class, + components: Dict[str, str], + matrix_name_suffix: str, + ): + """Runs strategy analysis in Emme and stores results in emmebank. + + Args: + time_period (str): Time period name abbreviation + transit_class (_type_): _description_ + components (Dict[str, str]): _description_ + matrix_name_suffix (str): Appended to time period and transit class name to create output matrix name. + """ + _tp_tclass = f"{time_period}_{transit_class.name}" + _matrix_name = f'mf"{_tp_tclass}_{matrix_name_suffix}"' + strategy_analysis = self.controller.emme_manager.tool( + "inro.emme.transit_assignment.extended.strategy_based_analysis" + ) + + spec = { + "trip_components": components, + "sub_path_combination_operator": "+", + "sub_strategy_combination_operator": "average", + "selected_demand_and_transit_volumes": { + "sub_strategies_to_retain": "ALL", + "selection_threshold": {"lower": -999999, "upper": 999999}, + }, + "analyzed_demand": f"mfTRN_{transit_class.name}_{time_period}", + "constraint": None, + "results": {"strategy_values": _matrix_name}, + "type": "EXTENDED_TRANSIT_STRATEGY_ANALYSIS", + } + strategy_analysis( + spec, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + def _run_path_analysis( + self, + time_period: str, + transit_class, + portion_of_path: str, + components: Dict[str, str], + matrix_name_suffix: str, + ): + """Runs path analysis in Emme and stores results in emmebank. + + Args: + time_period (str): Time period name abbreviation + transit_class (_type_): _description_ + components (Dict[str, str]): _description_ + matrix_name_suffix (str): Appended to time period and transit class name to create output matrix name. + """ + _tp_tclass = f"{time_period}_{transit_class.name}" + _matrix_name = f'mf"{_tp_tclass}_{matrix_name_suffix}"' + path_analysis = self.controller.emme_manager.tool( + "inro.emme.transit_assignment.extended.path_based_analysis" + ) + + spec = { + "portion_of_path": portion_of_path, + "trip_components": components, + "path_operator": "+", + "path_selection_threshold": { + "lower": -999999, + "upper": 999999 + }, + "path_to_od_aggregation": { + "operator": "average", + "aggregated_path_values": _matrix_name + }, + "analyzed_demand": None, + "constraint": None, + "type": "EXTENDED_TRANSIT_PATH_ANALYSIS", + } + path_analysis( + spec, + class_name=transit_class.name, + scenario=self.scenarios[time_period], + num_processors=self.controller.num_processors, + ) + + def mask_if_not_required_modes(self, time_period: str, transit_class) -> None: + """ + Enforce the `required_mode_combo` parameter by setting IVTs to 0 if don't have required modes. + + Args: + time_period (str): Time period name abbreviation + transit_class (_type_): _description_ + """ + if not transit_class.required_mode_combo: + return + + _ivt_skims = {} + for mode in transit_class.required_mode_combo: + transit_modes = [m for m in self.config.modes if m.type == mode] + for transit_mode in transit_modes: + if mode not in _ivt_skims.keys(): + _ivt_skims[mode] = self.matrix_cache[time_period].get_data( + f'mf"{time_period}_{transit_class.name}_{transit_mode.name}IVTT"' + ) + else: + _ivt_skims[mode] += self.matrix_cache[time_period].get_data( + f'mf"{time_period}_{transit_class.name}_{transit_mode.name}IVTT"' + ) + + # multiply all IVT skims together and see if they are greater than zero + has_all = None + for key, value in _ivt_skims.items(): + if has_all is not None: + has_all = np.multiply(has_all, value) + else: + has_all = value + + self._mask_skim_set(time_period, transit_class, has_all) + + def mask_above_max_transfers(self, time_period: str, transit_class): + """Reset skims to 0 if number of transfers is greater than max_transfers. + + Args: + time_period (str): Time period name abbreviation + transit_class (_type_): _description_ + """ + max_transfers = self.config.max_transfers + xfers = self.matrix_cache[time_period].get_data( + f'mf"{time_period}_{transit_class.name}_XFERS"' + ) + xfer_mask = np.less_equal(xfers, max_transfers) + self._mask_skim_set(time_period, transit_class, xfer_mask) + + def _mask_skim_set(self, time_period: str, transit_class, mask_array: NumpyArray): + """Mask a skim set (set of skims for a given time period and transit class) based on an array. + + Array values of >0 are kept. Zero are not. + + TODO add in checks for mask_array dimensions and values + + Args: + time_period (str): Time period name abbreviation + transit_class (_type_): _description_ + mask_array (NumpyArray): _description_ + """ + mask_array = np.greater(mask_array, 0) + mask_array = np.less(mask_array, inf) + for skim_key, skim in self.emmebank_skim_matrices( + time_periods=[time_period], transit_classes=[transit_class] + ).items(): + skim_data = self.matrix_cache[time_period].get_data(skim.name) + self.matrix_cache[time_period].set_data(skim.name, skim_data * mask_array) + + def _export_skims(self, time_period: str, transit_class: str): + """Export skims to OMX files by period.""" + # NOTE: skims in separate file by period + output_skim_path = self.get_abs_path( + self.config.output_skim_path + ) + omx_file_path = os.path.join( + output_skim_path, + self.config.output_skim_filename_tmpl.format(time_period=time_period, tclass=transit_class.name)) + os.makedirs(os.path.dirname(omx_file_path), exist_ok=True) + + _matrices = self.emmebank_skim_matrices(time_periods=[time_period], transit_classes=[transit_class]) +# matrices = {} +# matrices_growth = {} # matrices need to be multiplied by 100 + +# for skim in _matrices.keys(): +# if ("BOARDS" in skim) or ("XBOATIME" in skim): +# matrices[skim] = _matrices[skim] +# else: +# matrices_growth[skim] = _matrices[skim] + + with OMXManager( + omx_file_path, + "w", + self.scenarios[time_period], + matrix_cache=self.matrix_cache[time_period], + mask_max_value=1e7, + growth_factor=1 + ) as omx_file: + omx_file.write_matrices(_matrices) + +# with OMXManager( +# omx_file_path, +# "a", +# self.scenarios[time_period], +# matrix_cache=self.matrix_cache[time_period], +# mask_max_value=1e7, +# growth_factor=100 +# ) as omx_file: +# omx_file.write_matrices(matrices_growth) + + def _log_debug_report(self, _time_period): + num_zones = len(self.scenarios[_time_period].zone_numbers) + num_cells = num_zones * num_zones + self.logger.log( + f"Transit impedance summary for period {_time_period}", level="DEBUG" + ) + self.logger.log( + f"Number of zones: {num_zones}. Number of O-D pairs: {num_cells}. " + "Values outside -9999999, 9999999 are masked in summaries.", + level="DEBUG", + ) + self.logger.log( + "name min max mean sum", + level="DEBUG", + ) + + temp = self.emmebank_skim_matrices(time_periods=[_time_period]) + + for matrix_name in temp.keys(): + matrix_name = f'mf"{matrix_name}"' + values = self.matrix_cache[_time_period].get_data(matrix_name) + data = np.ma.masked_outside(values, -9999999, 9999999) + stats = ( + f"{matrix_name:25} {data.min():9.4g} {data.max():9.4g} " + f"{data.mean():9.4g} {data.sum(): 13.7g}" + ) + self.logger.log(stats, level="DEBUG") + + @staticmethod + def _copy_attribute_values(src, dst, attributes): + for domain, attrs in attributes.items(): + values = src.get_attribute_values(domain, attrs) + dst.set_attribute_values(domain, attrs, values) diff --git a/tm2py/components/time_of_day.py b/tm2py/components/time_of_day.py new file mode 100644 index 00000000..806d5a09 --- /dev/null +++ b/tm2py/components/time_of_day.py @@ -0,0 +1,81 @@ +"""Module with helpful matrix helper functions.""" + +from typing import Collection, Dict, Mapping, Optional, Union +from unicodedata import decimal + +import numpy as np +import pandas as pd + +from tm2py.components.component import Component, Subcomponent +from tm2py.config import TimeSplitConfig +from tm2py.logger import LogStartEnd + +NumpyArray = np.array + + +class TimePeriodSplit(Subcomponent): + def __init__( + self, + controller, + component: Component, + split_configs: Collection[TimeSplitConfig], + ): + + super().__init__(controller, component) + self.split_configs = split_configs + + def validate_inputs(self): + # TODO + pass + + @staticmethod + def split_matrix(matrix, split_config: TimeSplitConfig): + if isinstance(matrix, dict): + _split_demand = {} + for key, value in matrix.items(): + if split_config.production and split_config.attraction: + prod, attract = ( + 0.5 * split_config.production, + 0.5 * split_config.attraction, + ) + _split_demand[key] = prod * value + attract * value.T + else: + _split_demand[key] = split_config.od * value + + _split_demand[key] = np.around(_split_demand[key], decimals=2) + else: + if split_config.production and split_config.attraction: + prod, attract = ( + 0.5 * split_config.production, + 0.5 * split_config.attraction, + ) + _split_demand = prod * matrix + attract * matrix.T + else: + _split_demand = split_config.od * matrix + + _split_demand = np.around(_split_demand, decimals=2) + + return _split_demand + + @LogStartEnd() + def run(self, demand: NumpyArray) -> Dict[str, NumpyArray]: + """Split a demand matrix according to a TimeOfDaySplitConfig. + + Right now supports simple factoring of demand. If TimeOfDaySplitConfig has productions + and attractions, will balance the matrix to product an OD matrix. If has origins and + destinations, wont balance. + + Args: + matrix (NumpyArray): matrix to split. + split_configs (Collection[TimeOfDaySplitConfig]): List of TimeOfDaySplitConfigs to use. + + Returns: + Dict[str, NumpyArray]: _description_ + """ + matrix_dict = {} + for _split in self.split_configs: + matrix_dict[_split.time_period] = TimePeriodSplit.split_matrix( + demand, _split + ) + + return matrix_dict diff --git a/tm2py/config.py b/tm2py/config.py index ab0d1d14..3970c552 100644 --- a/tm2py/config.py +++ b/tm2py/config.py @@ -1,14 +1,16 @@ -"""Config implementation and schema. -""" +"""Config implementation and schema.""" # pylint: disable=too-many-instance-attributes +import datetime +import pathlib from abc import ABC -from typing import List, Tuple, Union, Optional -from typing_extensions import Literal +from typing import Dict, List, Optional, Tuple, Union -from pydantic import Field, validator -from pydantic.dataclasses import dataclass import toml +from pydantic import Field, NonNegativeFloat, validator +from pydantic.dataclasses import dataclass +from pydantic.error_wrappers import ValidationError +from typing_extensions import Literal class ConfigItem(ABC): @@ -22,10 +24,11 @@ class ConfigItem(ABC): """ def __getitem__(self, key): + """Get item for config. D[key] -> D[key] if key in D, else raise KeyError.""" return getattr(self, key) def items(self): - """D.items() -> a set-like object providing a view on D's items""" + """The sub-config objects in config.""" return self.__dict__.items() def get(self, key, default=None): @@ -35,7 +38,7 @@ def get(self, key, default=None): @dataclass(frozen=True) class ScenarioConfig(ConfigItem): - """Scenario related parameters + """Scenario related parameters. Properties: verify: optional, default False if specified as True components will run @@ -43,10 +46,16 @@ class ScenarioConfig(ConfigItem): (not implemented yet) maz_landuse_file: relative path to maz_landuse_file used by multiple components + name: scenario name string year: model year, must be at least 2005 + landuse_file: TAZ file """ - maz_landuse_file: str + maz_landuse_file: pathlib.Path + zone_seq_file: pathlib.Path + landuse_file: pathlib.Path + landuse_index_column: str + name: str year: int = Field(ge=2005) verify: Optional[bool] = Field(default=False) @@ -59,7 +68,10 @@ class ScenarioConfig(ConfigItem): "highway_maz_assign", "highway", "highway_maz_skim", - "transit", + "drive_access_skims", + "prepare_network_transit", + "transit_assign", + "transit_skim", "household", "visitor", "internal_external", @@ -67,19 +79,45 @@ class ScenarioConfig(ConfigItem): ] EmptyString = Literal[""] +@dataclass(frozen=True) +class WarmStartConfig(ConfigItem): + """Warm start parameters. + + Note that the components will be executed in the order listed. + + Properties: + warmstart: Boolean indicating whether warmstart demand matrices are used. + warmstart_check: if on, check that demand matrix files exist. + household_highway_demand_file: file name template of warmstart household highway demand matrices. + household_transit_demand_file: file name template of warmstart household transit demand matrices. + air_passenger_highway_demand_file: file name template of warmstart airport highway demand matrices. + internal_external_highway_demand_file: file name template of warmstart internal-external highway demand matrices. + """ + + warmstart: Optional[bool] = Field(default=False) + warmstart_check: Optional[bool] = Field(default=False) + household_highway_demand_file: Optional[str] = Field(default="") + household_transit_demand_file: Optional[str] = Field(default="") + air_passenger_highway_demand_file: Optional[str] = Field(default="") + internal_external_highway_demand_file: Optional[str] = Field(default="") + truck_highway_demand_file: Optional[str] = Field(default="") @dataclass(frozen=True) class RunConfig(ConfigItem): - """Model run parameters + """Model run parameters. + + Note that the components will be executed in the order listed. Properties: start_iteration: start iteration number, 0 to include initial_components end_iteration: final iteration number start_component: name of component to start with, will skip components list prior to this component - initial_components: list of components to run as initial (0) iteration - global_iteration_components: list of component to run at every iteration, in order + initial_components: list of components to run as initial (0) iteration, in order + global_iteration_components: list of component to run at every subsequent + iteration (max(1, start_iteration) to end_iteration), in order. final_components: list of components to run after final iteration, in order + warmstart: warmstart configuration, including file locations. """ initial_components: Tuple[ComponentNames, ...] @@ -87,88 +125,531 @@ class RunConfig(ConfigItem): final_components: Tuple[ComponentNames, ...] start_iteration: int = Field(ge=0) end_iteration: int = Field(gt=0) + warmstart: WarmStartConfig = WarmStartConfig() start_component: Optional[Union[ComponentNames, EmptyString]] = Field(default="") - @classmethod - @validator("end_iteration") + @validator("end_iteration", allow_reuse=True) def end_iteration_gt_start(cls, value, values): - """Validate end_iteration greater than start_iteration""" - if "start_iteration" in values: + """Validate end_iteration greater than start_iteration.""" + if values.get("start_iteration"): assert ( - value > values["start_iteration"] - ), "must be greater than start_iteration" + value >= values["start_iteration"] + ), f"'end_iteration' ({value}) must be greater than 'start_iteration'\ + ({values['start_iteration']})" return value + @validator("start_component", allow_reuse=True) + def start_component_used(cls, value, values): + """Validate start_component is listed in *_components.""" + if not values.get("start_component") or not value: + return value + + if "start_iteration" in values: + if values.get("start_iteration") == 0: + assert value in values.get( + "initial_components", [value] + ), f"'start_component' ({value}) must be one of the components listed in\ + initial_components if 'start_iteration = 0'" + else: + assert value in values.get( + "global_iteration_components", [values] + ), f"'start_component' ({value}) must be one of the components listed in\ + global_iteration_components if 'start_iteration > 0'" + return value + + +LogLevel = Literal[ + "TRACE", "DEBUG", "DETAIL", "INFO", "STATUS", "WARN", "ERROR", "FATAL" +] + + +@dataclass(frozen=True) +class LoggingConfig(ConfigItem): + """Logging parameters. TODO. + + Properties: + display_level: filter level for messages to show in console, default + is STATUS + run_file_path: relative path to high-level log file for the model run, + default is tm2py_run_[%Y%m%d_%H%M].log + run_file_level: filter level for messages recorded in the run log, + default is INFO + log_file_path: relative path to general log file with more detail + than the run_file, default is tm2py_detail_[%Y%m%d_%H%M].log + log_file_level: optional, filter level for messages recorded in the + standard log, default is DETAIL + log_on_error_file_path: relative path to use for fallback log message cache + on error, default is tm2py_error_[%Y%m%d_%H%M].log + notify_slack: if true notify_slack messages will be sent, default is False + use_emme_logbook: if True log messages recorded in the standard log file will + also be recorded in the Emme logbook, default is True + iter_component_level: tuple of tuples of iteration, component name, log level. + Used to override log levels (log_file_level) for debugging and recording + more detail in the log_file_path. + Example: [ [2, "highway", "TRACE"] ] to record all messages + during the highway component run at iteration 2. + """ + + display_level: Optional[LogLevel] = Field(default="STATUS") + run_file_path: Optional[str] = Field( + default="tm2py_run_{}.log".format( + datetime.datetime.now().strftime("%Y%m%d_%H%M") + ) + ) + run_file_level: Optional[LogLevel] = Field(default="INFO") + log_file_path: Optional[str] = Field( + default="tm2py_debug_{}.log".format( + datetime.datetime.now().strftime("%Y%m%d_%H%M") + ) + ) + log_file_level: Optional[LogLevel] = Field(default="DEBUG") + log_on_error_file_path: Optional[str] = Field( + default="tm2py_error_{}.log".format( + datetime.datetime.now().strftime("%Y%m%d_%H%M") + ) + ) + + notify_slack: Optional[bool] = Field(default=False) + use_emme_logbook: Optional[bool] = Field(default=True) + iter_component_level: Optional[ + Tuple[Tuple[int, ComponentNames, LogLevel], ...] + ] = Field(default=None) + @dataclass(frozen=True) class TimePeriodConfig(ConfigItem): - """Time time period entry""" + """Time time period entry. - name: str + Properties: + name: name of the time period, up to four characters + length_hours: length of the time period in hours + highway_capacity_factor: factor to use to multiple the per-hour + capacites in the highway network + emme_scenario_id: scenario ID to use for Emme per-period + assignment (highway and transit) scenarios + congested_transit_assn_max_iteration: max iterations in congested + transit assignment stopping criteria + """ + + name: str = Field(max_length=4) + start_period: float = Field(gt=0) length_hours: float = Field(gt=0) highway_capacity_factor: float = Field(gt=0) emme_scenario_id: int = Field(ge=1) + congested_transit_assn_max_iteration: int = Field(ge=1) + description: Optional[str] = Field(default="") @dataclass(frozen=True) -class HouseholdConfig(ConfigItem): - """Household (residents) model parameters""" +class TimeSplitConfig(ConfigItem): + """Split matrix i and j. - highway_demand_file: str - transit_demand_file: str + i.e. for time of day splits. + """ + + time_period: str + production: Optional[NonNegativeFloat] = None + attraction: Optional[NonNegativeFloat] = None + od: Optional[NonNegativeFloat] = None + + @validator("production", "attraction", "od") + def less_than_equal_one(cls, v): + if v: + assert v <= 1.0, "Value should be less than or equal to 1" + return v + + def __post_init__(self): + if self.od and any([self.production, self.attraction]): + raise ValueError( + f"TimeSplitConfig: Must either specifify an od or any of\ + production/attraction - not both.\n{self}" + ) + + if not all([self.production, self.attraction]) and any( + [self.production, self.attraction] + ): + raise ValueError( + f"TimeSplitConfig: Must have both production AND attraction\ + if one of them is specified." + ) + + +@dataclass(frozen=True) +class TimeOfDayClassConfig(ConfigItem): + """Configuration for a class of time of day model.""" + + name: str + time_period_split: List[TimeSplitConfig] + + +@dataclass(frozen=True) +class TimeOfDayConfig(ConfigItem): + """Configuration for time of day model.""" + + classes: List[TimeOfDayClassConfig] + + +@dataclass(frozen=True) +class HouseholdModeAgg(ConfigItem): + """household trip mode aggregation input parameters. + + Properties: + name: aggregate name used for the class group in the input columns + for the trip tables, + modes: list of mode choice mode names used for the trip tables + """ + + name: str + modes: Tuple[str, ...] + + +@dataclass(frozen=True) +class HouseholdConfig(ConfigItem): + """Household (residents) model parameters.""" + + highway_demand_file: pathlib.Path + transit_demand_file: pathlib.Path + highway_taz_ctramp_output_file: pathlib.Path + mode_agg: List[HouseholdModeAgg] + highway_maz_ctramp_output_file: pathlib.Path + transit_tap_ctramp_output_file: pathlib.Path + transit_taz_ctramp_output_file: pathlib.Path + active_demand_file: pathlib.Path + OwnedAV_ZPV_factor: float + TNC_ZPV_factor: float + ctramp_indiv_trip_file: str + ctramp_joint_trip_file: str + ctramp_run_dir: pathlib.Path + rideshare_mode_split: Dict[str,float] + taxi_split: Dict[str,float] + single_tnc_split: Dict[str,float] + shared_tnc_split: Dict[str,float] + ctramp_mode_names: Dict[float,str] + income_segment: Dict[str, Union[float, str, list]] + ctramp_hh_file : str @dataclass(frozen=True) class AirPassengerDemandAggregationConfig(ConfigItem): - """Air passenger demand aggregation input parameters""" + """Air passenger demand aggregation input parameters. - result_class_name: str - src_group_name: str + Properties: + name: (src_group_name) name used for the class group in the input columns + for the trip tables, + mode: (result_class_name) name used in the output OMX matrix names, note + that this should match the expected naming convention in the + HighwayClassDemandConfig name(s) + access_modes: list of names used for the access modes in the input + columns for the trip tables + """ + + name: str + mode: str access_modes: Tuple[str, ...] @dataclass(frozen=True) class AirPassengerConfig(ConfigItem): - """Air passenger model parameters""" + """Air passenger model parameters. + + Properties + + highway_demand_file: output OMX file + input_demand_folder: location to find the input demand csvs + input_demand_filename_tmpl: filename template for input demand. Should have + {year}, {direction} and {airport} variables and end in '.csv' + reference_start_year: base start year for input demand tables + used to calculate the linear interpolation, as well as + in the file name template {year}_{direction}{airport}.csv + reference_end_year: end year for input demand tables + used to calculate the linear interpolation, as well as + in the file name template {year}_{direction}{airport}.csv + airport_names: list of one or more airport names / codes as used in + the input file names + demand_aggregation: specification of aggregation of by-access mode + demand to highway class demand + """ + output_trip_table_directory: pathlib.Path + outfile_trip_table_tmp: str + input_demand_folder: pathlib.Path + input_demand_filename_tmpl: str highway_demand_file: str - input_demand_folder: str reference_start_year: str reference_end_year: str - demand_aggregation: Tuple[AirPassengerDemandAggregationConfig, ...] + airport_names: List[str] + demand_aggregation: List[AirPassengerDemandAggregationConfig] + + @validator("input_demand_filename_tmpl") + def valid_input_demand_filename_tmpl(cls, value): + """Validate skim matrix template has correct {}.""" + + assert ( + "{year}" in value + ), "-> 'output_skim_matrixname_tmpl must have {year}, found {value}." + assert ( + "{direction}" in value + ), "-> 'output_skim_matrixname_tmpl must have {direction}, found {value}." + assert ( + "{airport}" in value + ), "-> 'output_skim_matrixname_tmpl must have {airport}, found {value}." + return value @dataclass(frozen=True) -class InternalExternalConfig(ConfigItem): - """Internal <-> External model parameters""" +class MatrixFactorConfig(ConfigItem): + """Mapping of zone or list of zones to factor value.""" + + zone_index: Optional[Union[int, List[int]]] + factor: Optional[float] = Field(default=None) + i_factor: Optional[float] = Field(default=None) + j_factor: Optional[float] = Field(default=None) + as_growth_rate: Optional[bool] = Field(default=False) + + @validator("zone_index", allow_reuse=True) + def valid_zone_index(value): + """Validate zone index and turn to list if isn't one.""" + if isinstance(value, str): + value = int(value) + if isinstance(value, int): + value = [value] + assert all([x >= 0 for x in value]), "Zone_index must be greater or equal to 0" + return value - highway_demand_file: str - input_demand_file: str - reference_year: int - toll_choice_time_coefficient: float + @validator("factor", allow_reuse=True) + def valid_factor(value, values): + assert ( + "i_factor" not in values.keys() + ), "Found both `factor` and\ + `i_factor` in MatrixFactorConfig. Should be one or the other." + + assert ( + "j_factor" not in values.keys() + ), "Found both `factor` and\ + `j_factor` in MatrixFactorConfig. Should be one or the other." + return value + + +@dataclass(frozen=True) +class CoefficientConfig(ConfigItem): + """Coefficient and properties to be used in utility or regression.""" + + property: str + coeff: Optional[float] = Field(default=None) + + +@dataclass(frozen=True) +class ChoiceClassConfig(ConfigItem): + """Choice class parameters. + + Properties: + property_to_skim_toll: Maps a property in the utility equation with a list of skim + properties. If more than one skim property is listed, they will be summed together + (e.g. cost if the sum of bridge toll and value toll). This defaults to a value in the + code. + property_to_skim_notoll: Maps a property in the utility equation with a list of skim + properties for no toll choice.If more than one skim property is listed, they will + be summed together (e.g. cost if the sum of bridge toll and value toll). This + defaults to a value in the code. + property_factors: This will scale the property for this class. e.g. a shared ride cost + could be applied a factor assuming that the cost is shared among individuals. + + The end value in the utility equation for class c and property p is: + + utility[p].coeff * + classes[c].property_factor[p] * + sum(skim(classes[c].skim_mode,skim_p) for skim_p in property_to_skim[p]) + """ + + name: str + skim_mode: Optional[str] = Field(default="da") + veh_group_name: Optional[str] = Field(default="") + property_factors: Optional[List[CoefficientConfig]] = Field(default=None) + + +@dataclass(frozen=True) +class TollChoiceConfig(ConfigItem): + """Toll choice parameters. + + Properties: + property_to_skim_toll: Maps a property in the utility equation with a list of skim + properties. If more than one skim property is listed, they will be summed together + (e.g. cost if the sum of bridge toll and value toll). This defaults to a value in the + code. + property_to_skim_notoll: Maps a property in the utility equation with a list of skim + properties for no toll choice.If more than one skim property is listed, they will + be summed together (e.g. cost if the sum of bridge toll and value toll). This + defaults to a value in the code. + """ + + classes: List[ChoiceClassConfig] value_of_time: float - shared_ride_2_toll_factor: float - shared_ride_3_toll_factor: float operating_cost_per_mile: float + property_to_skim_toll: Optional[Dict[str, List[str]]] = Field(default_factory=dict) + property_to_skim_notoll: Optional[Dict[str, List[str]]] = Field( + default_factory=dict + ) + utility: Optional[List[CoefficientConfig]] = Field(default_factory=dict) @dataclass(frozen=True) -class TruckConfig(ConfigItem): - """Truck model parameters""" +class DemandGrowth(ConfigItem): + input_demand_file: pathlib.Path + input_demand_matrixname_tmpl: str + reference_year: int + annual_growth_rate: List[MatrixFactorConfig] + special_gateway_adjust: Optional[List[MatrixFactorConfig]] = Field( + default_factory=list + ) + +@dataclass(frozen=True) +class InternalExternalConfig(ConfigItem): + """Internal <-> External model parameters.""" + + output_trip_table_directory: pathlib.Path + outfile_trip_table_tmp: str highway_demand_file: str - k_factors_file: str - friction_factors_file: str - value_of_time: float - operating_cost_per_mile: float - toll_choice_time_coefficient: float + modes: List[str] + demand: DemandGrowth + time_of_day: TimeOfDayConfig + toll_choice: TollChoiceConfig + + +@dataclass(frozen=True) +class TripGenerationFormulaConfig(ConfigItem): + """TripProductionConfig. + + Trip productions or attractions for a zone are the constant plus the sum of the rates * values + in land use file for that zone. + """ + + land_use_rates: List[CoefficientConfig] + constant: Optional[float] = Field(default=0.0) + multiplier: Optional[float] = Field(default=1.0) + + +@dataclass(frozen=True) +class TripGenerationClassConfig(ConfigItem): + """Trip Generation parameters.""" + + name: str + purpose: Optional[str] = Field(default=None) + production_formula: Optional[TripGenerationFormulaConfig] = Field(default=None) + attraction_formula: Optional[TripGenerationFormulaConfig] = Field(default=None) + balance_to: Optional[str] = Field(default="production") + + +@dataclass(frozen=True) +class TripGenerationConfig(ConfigItem): + """Trip Generation parameters.""" + + classes: List[TripGenerationClassConfig] + + +@dataclass(frozen=True) +class TripDistributionClassConfig(ConfigItem): + """Trip Distribution parameters. + + Properties: + name: name of class to apply to + impedance_name: refers to an impedance (skim) matrix to use - often a blended skim. + use_k_factors: boolean on if to use k-factors + """ + + name: str + impedance: str + use_k_factors: bool + + +@dataclass(frozen=True) +class TruckClassConfig(ConfigItem): + """Truck class parameters.""" + + name: str + description: Optional[str] = "" + + +@dataclass(frozen=True) +class ImpedanceConfig(ConfigItem): + """Blended skims used for accessibility/friction calculations. + + Properties:I + name: name to store it as, referred to in TripDistribution config + skim_mode: name of the mode to use for the blended skim + time_blend: blend of time periods to use; mapped to the factors (which should sum to 1) + """ + + name: str + skim_mode: str + time_blend: Dict[str, float] + + @validator("time_blend", allow_reuse=True) + def sums_to_one(value): + """Validate highway.maz_to_maz.skim_period refers to a valid period.""" + assert sum(value.values()) - 1 < 0.0001, "blend factors must sum to 1" + return value + + +@dataclass(frozen=True) +class TripDistributionConfig(ConfigItem): + """Trip Distribution parameters.""" + + classes: List[TripDistributionClassConfig] max_balance_iterations: int max_balance_relative_error: float + friction_factors_file: pathlib.Path + k_factors_file: Optional[pathlib.Path] = None + + +@dataclass(frozen=True) +class TruckConfig(ConfigItem): + """Truck model parameters.""" + + classes: List[TruckClassConfig] + impedances: List[ImpedanceConfig] + trip_gen: TripGenerationConfig + trip_dist: TripDistributionConfig + time_of_day: TimeOfDayConfig + toll_choice: TollChoiceConfig + output_trip_table_directory: pathlib.Path + outfile_trip_table_tmp: str + highway_demand_file: str + + """ + @validator("classes") + def class_consistency(cls, v, values): + # TODO Can't get to work righ tnow + _class_names = [c.name for c in v] + _gen_classes = [c.name for c in values["trip_gen"]] + _dist_classes = [c.name for c in values["trip_dist"]] + _time_classes = [c.name for c in values["time_split"]] + _toll_classes = [c.name for c in values["toll_choice"]] + + assert ( + _class_names == _gen_classes + ), "truck.classes ({_class_names}) doesn't equal\ + class names in truck.trip_gen ({_gen_classes})." + assert ( + _class_names == _dist_classes + ), "truck.classes ({_class_names}) doesn't equal\ + class names in truck.trip_dist ({_dist_classes})." + assert ( + _class_names == _time_classes + ), "truck.classes ({_class_names}) doesn't equal\ + class names in truck.time_split ({_time_classes})." + assert ( + _class_names == _toll_classes + ), "truck.classes ({_class_names}) doesn't equal\ + class names in truck.toll_choice ({_toll_classes})." + + return v + """ @dataclass(frozen=True) class ActiveModeShortestPathSkimConfig(ConfigItem): - """Active mode skim entry""" + """Active mode skim entry.""" mode: str roots: str @@ -179,7 +660,7 @@ class ActiveModeShortestPathSkimConfig(ConfigItem): @dataclass(frozen=True) class ActiveModesConfig(ConfigItem): - """Active Mode skim parameters""" + """Active Mode skim parameters.""" emme_scenario_id: int shortest_path_skims: Tuple[ActiveModeShortestPathSkimConfig, ...] @@ -187,13 +668,13 @@ class ActiveModesConfig(ConfigItem): @dataclass(frozen=True) class HighwayCapClassConfig(ConfigItem): - """Highway link capacity and speed ('capclass') index entry + """Highway link capacity and speed ('capclass') index entry. Properties: capclass: cross index for link @capclass lookup capacity: value for link capacity, PCE / hour free_flow_speed: value for link free flow speed, miles / hour - critical_speed: value for cirtical speed (Ja) used in Akcelik + critical_speed: value for critical speed (Ja) used in Akcelik type functions """ @@ -204,15 +685,16 @@ class HighwayCapClassConfig(ConfigItem): @dataclass(frozen=True) -class HighwayClassDemandConfig(ConfigItem): - """Highway class input source for demand. +class ClassDemandConfig(ConfigItem): + """Input source for demand for highway or transit assignment class. Used to specify where to find related demand file for this - highway class. Multiple + highway or transit class. Properties: source: reference name of the component section for the - source "highway_demand_file" location, one of: + source "highway_demand_file" (for a highway class) + or "transit_demand_file" (for a transit class), one of: "household", "air_passenger", "internal_external", "truck" name: name of matrix in the OMX file, can include "{period}" placeholder @@ -260,13 +742,13 @@ class HighwayClassConfig(ConfigItem): description: longer text used in attribute and matrix descriptions mode_code: single character mode, used to generate link.modes to identify subnetwork, generated from "excluded_links" keywords. - Should be unique in list of classes, unless multiple classes + Should be unique in list of :es, unless multiple classes have identical excluded_links specification. Cannot be the same as used for highway.maz_to_maz.mode_code. value_of_time: value of time for this class in $ / hr operating_cost_per_mile: vehicle operating cost in cents / mile demand: list of OMX file and matrix keyname references, - see HighwayClassDemandConfig + see ClassDemandConfig excluded_links: list of keywords to identify links to exclude from this class' available subnetwork (generate link.modes) Options are: @@ -286,14 +768,15 @@ class HighwayClassConfig(ConfigItem): "dist": distance in miles "hovdist": distance on HOV facilities (is_sr2 or is_sr3) "tolldist": distance on toll facilities - (@tollbooth > highway.tolls.tollbooth_start_index) + (@tollbooth > highway.tolls.valuetoll_start_tollbooth_code) "freeflowtime": free flow travel time in minutes "bridgetoll_{vehicle}": bridge tolls, {vehicle} refers to toll group "valuetoll_{vehicle}": other, non-bridge tolls, {vehicle} refers to toll group """ name: str = Field(min_length=1, max_length=10) - description: str = Field(default="") + veh_group_name: str = Field(min_length=1, max_length=10) + description: Optional[str] = Field(default="") mode_code: str = Field(min_length=1, max_length=1) value_of_time: float = Field(gt=0) operating_cost_per_mile: float = Field(ge=0) @@ -304,7 +787,7 @@ class HighwayClassConfig(ConfigItem): skims: Tuple[str, ...] = Field() toll: Tuple[str, ...] = Field() toll_factor: Optional[float] = Field(default=None, gt=0) - demand: Tuple[HighwayClassDemandConfig, ...] = Field() + demand: Tuple[ClassDemandConfig, ...] = Field() @dataclass(frozen=True) @@ -313,7 +796,7 @@ class HighwayTollsConfig(ConfigItem): Properties: file_path: source relative file path for the highway tolls index CSV - tollbooth_start_index: tollbooth separates links with "bridge" tolls + valuetoll_start_tollbooth_code: tollbooth separates links with "bridge" tolls (index < this value) vs. "value" tolls. These toll attributes can then be referenced separately in the highway.classes[].tolls list @@ -322,25 +805,28 @@ class HighwayTollsConfig(ConfigItem): dst_vehicle_group_names: list of names used in destination network for the corresponding vehicle group. Length of list must be the same as src_vehicle_group_names. Used for toll related attributes and - resulting skim matrices. Cross-referenced in list of highway.classes[]: + resulting skim matrices. Cross-referenced in list of highway.classes[], + valid keywords for: excluded_links: "is_toll_{vehicle}" tolls: "@bridgetoll_{vehicle}", "@valuetoll_{vehicle}" skims: "bridgetoll_{vehicle}", "valuetoll_{vehicle}" """ - file_path: str = Field() - tollbooth_start_index: int = Field(gt=1) + file_path: pathlib.Path = Field() + valuetoll_start_tollbooth_code: int = Field(gt=1) src_vehicle_group_names: Tuple[str, ...] = Field() dst_vehicle_group_names: Tuple[str, ...] = Field() - @classmethod @validator("dst_vehicle_group_names", always=True) def dst_vehicle_group_names_length(cls, value, values): - """Validate dst_vehicle_group_names has same length as src_vehicle_group_names""" + """Validate dst_vehicle_group_names has same length as src_vehicle_group_names.""" if "src_vehicle_group_names" in values: assert len(value) == len( values["src_vehicle_group_names"] - ), "must be same length as src_vehicle_group_names" + ), "dst_vehicle_group_names must be same length as src_vehicle_group_names" + assert all( + [len(v) <= 4 for v in value] + ), "dst_vehicle_group_names must be 4 characters or less" return value @@ -359,7 +845,7 @@ def dst_vehicle_group_names_length(cls, value, values): @dataclass(frozen=True) class DemandCountyGroupConfig(ConfigItem): - """Grouping of counties for assignment and demand files + """Grouping of counties for assignment and demand files. Properties: number: id number for this group, must be unique @@ -372,7 +858,7 @@ class DemandCountyGroupConfig(ConfigItem): @dataclass(frozen=True) class HighwayMazToMazConfig(ConfigItem): - """Highway MAZ to MAZ shortest path assignment and skim parameters + """Highway MAZ to MAZ shortest path assignment and skim parameters. Properties: mode_code: single character mode, used to generate link.modes to @@ -400,17 +886,17 @@ class HighwayMazToMazConfig(ConfigItem): mode_code: str = Field(min_length=1, max_length=1) value_of_time: float = Field(gt=0) operating_cost_per_mile: float = Field(ge=0) + max_distance: float = Field(gt=0) max_skim_cost: float = Field(gt=0) excluded_links: Tuple[str, ...] = Field() - demand_file: str = Field() + demand_file: pathlib.Path = Field() demand_county_groups: Tuple[DemandCountyGroupConfig, ...] = Field() skim_period: str = Field() - output_skim_file: str = Field() + output_skim_file: pathlib.Path = Field() - @classmethod @validator("demand_county_groups") def unique_group_numbers(cls, value): - """Validate list of demand_county_groups has unique .number values""" + """Validate list of demand_county_groups has unique .number values.""" group_ids = [group.number for group in value] assert len(group_ids) == len(set(group_ids)), "-> number value must be unique" return value @@ -418,7 +904,7 @@ def unique_group_numbers(cls, value): @dataclass(frozen=True) class HighwayConfig(ConfigItem): - """Highway assignment and skims parameters + """Highway assignment and skims parameters. Properties: generic_highway_mode_code: single character unique mode ID for entire @@ -428,7 +914,12 @@ class HighwayConfig(ConfigItem): area_type_buffer_dist_miles: used to in calculation to categorize link @areatype The area type is determined based on the average density of nearby (within this buffer distance) MAZs, using (pop+jobs*2.5)/acres - output_skim_path: relative path template for output skims in OMX format + drive_access_output_skim_path: relative path for drive access to transit skims + output_skim_path: relative path template from run dir for OMX output skims + output_skim_filename_tmpl: template for OMX filename for a time period. Must include + {time_period} in the string and end in '.omx'. + output_skim_matrixname_tmpl: template for matrix names within OMX output skims. + Should include {time_period}, {mode}, and {property} tolls: input toll specification, see HighwayTollsConfig maz_to_maz: maz-to-maz shortest path assignment and skim specification, see HighwayMazToMazConfig @@ -442,34 +933,60 @@ class HighwayConfig(ConfigItem): relative_gap: float = Field(ge=0) max_iterations: int = Field(ge=0) area_type_buffer_dist_miles: float = Field(gt=0) - output_skim_path: str = Field() + drive_access_output_skim_path: Optional[str] = Field(default=None) + output_skim_path: pathlib.Path = Field() + output_skim_filename_tmpl: str = Field() + output_skim_matrixname_tmpl: str = Field() tolls: HighwayTollsConfig = Field() maz_to_maz: HighwayMazToMazConfig = Field() classes: Tuple[HighwayClassConfig, ...] = Field() capclass_lookup: Tuple[HighwayCapClassConfig, ...] = Field() + interchange_nodes_file: str = Field() + + @validator("output_skim_filename_tmpl") + def valid_skim_template(value): + """Validate skim template has correct {} and extension.""" + assert ( + "{time_period" in value + ), f"-> output_skim_filename_tmpl must have {{time_period}}', found {value}." + assert ( + value[-4:].lower() == ".omx" + ), f"-> 'output_skim_filename_tmpl must end in '.omx', found {value[-4:].lower() }" + return value + + @validator("output_skim_matrixname_tmpl") + def valid_skim_matrix_name_template(value): + """Validate skim matrix template has correct {}.""" + assert ( + "{time_period" in value + ), "-> 'output_skim_matrixname_tmpl must have {time_period}, found {value}." + assert ( + "{property" in value + ), "-> 'output_skim_matrixname_tmpl must have {property}, found {value}." + assert ( + "{mode" in value + ), "-> 'output_skim_matrixname_tmpl must have {mode}, found {value}." + return value - @classmethod @validator("capclass_lookup") def unique_capclass_numbers(cls, value): - """Validate list of capclass_lookup has unique .capclass values""" + """Validate list of capclass_lookup has unique .capclass values.""" capclass_ids = [i.capclass for i in value] error_msg = "-> capclass value must be unique in list" assert len(capclass_ids) == len(set(capclass_ids)), error_msg return value - @classmethod @validator("classes", pre=True) def unique_class_names(cls, value): - """Validate list of classes has unique .name values""" + """Validate list of classes has unique .name values.""" class_names = [highway_class["name"] for highway_class in value] error_msg = "-> name value must be unique in list" assert len(class_names) == len(set(class_names)), error_msg return value - @classmethod @validator("classes") def validate_class_mode_excluded_links(cls, value, values): - """Validate list of classes has unique .mode_code or .excluded_links match""" + """Validate list of classes has unique .mode_code or .excluded_links match.""" # validate if any mode IDs are used twice, that they have the same excluded links sets mode_excluded_links = {values["generic_highway_mode_code"]: set([])} for i, highway_class in enumerate(value): @@ -490,13 +1007,20 @@ def validate_class_mode_excluded_links(cls, value, values): mode_excluded_links[highway_class.mode_code] = highway_class.excluded_links return value - @classmethod @validator("classes") def validate_class_keyword_lists(cls, value, values): - """Validate classes .skims, .toll, and .excluded_links values""" + """Validate classes .skims, .toll, and .excluded_links values.""" if "tolls" not in values: return value - avail_skims = ["time", "dist", "hovdist", "tolldist", "freeflowtime"] + avail_skims = [ + "time", + "dist", + "hovdist", + "tolldist", + "freeflowtime", + "rlbty", + "autotime", + ] available_link_sets = ["is_sr", "is_sr2", "is_sr3", "is_auto_only"] avail_toll_attrs = [] for name in values["tolls"].dst_vehicle_group_names: @@ -528,70 +1052,193 @@ def check_keywords(class_num, key, val, available): @dataclass(frozen=True) class TransitModeConfig(ConfigItem): - """Transit mode definition (see also mode in the Emme API)""" + """Transit mode definition (see also mode in the Emme API).""" - type: Literal["WALK", "ACCESS", "EGRESS", "LOCAL", "PREMIUM"] + type: Literal["WALK", "ACCESS", "EGRESS", "LOCAL", "PREMIUM", "DRIVE", "PNR_dummy","KNR_dummy"] assign_type: Literal["TRANSIT", "AUX_TRANSIT"] mode_id: str = Field(min_length=1, max_length=1) name: str = Field(max_length=10) + description: Optional[str] = "" in_vehicle_perception_factor: Optional[float] = Field(default=None, ge=0) - speed_miles_per_hour: Optional[float] = Field(default=None, gt=0) + speed_or_time_factor: Optional[str] = Field(default="") + initial_boarding_penalty: Optional[float] = Field(default=None, ge=0) + transfer_boarding_penalty: Optional[float] = Field(default=None, ge=0) + headway_fraction: Optional[float] = Field(default=None, ge=0) + transfer_wait_perception_factor: Optional[float] = Field(default=None, ge=0) + eawt_factor: Optional[float] = Field(default=1) - @classmethod @validator("in_vehicle_perception_factor", always=True) def in_vehicle_perception_factor_valid(cls, value, values): - """Validate in_vehicle_perception_factor exists if assign_type is TRANSIT""" + """Validate in_vehicle_perception_factor exists if assign_type is TRANSIT.""" if "assign_type" in values and values["assign_type"] == "TRANSIT": assert value is not None, "must be specified when assign_type==TRANSIT" return value - @classmethod - @validator("speed_miles_per_hour", always=True) - def speed_miles_per_hour_valid(cls, value, values): - """Validate speed_miles_per_hour exists if assign_type is AUX_TRANSIT""" + @validator("speed_or_time_factor", always=True) + def speed_or_time_factor_valid(cls, value, values): + """Validate speed_or_time_factor exists if assign_type is AUX_TRANSIT.""" if "assign_type" in values and values["assign_type"] == "AUX_TRANSIT": assert value is not None, "must be specified when assign_type==AUX_TRANSIT" return value + @validator("initial_boarding_penalty", always=True) + def initial_boarding_penalty_valid(value, values): + """Validate initial_boarding_penalty exists if assign_type is TRANSIT.""" + if "assign_type" in values and values["assign_type"] == "TRANSIT": + assert value is not None, "must be specified when assign_type==TRANSIT" + return value + + @validator("transfer_boarding_penalty", always=True) + def transfer_boarding_penalty_valid(value, values): + """Validate transfer_boarding_penalty exists if assign_type is TRANSIT.""" + if "assign_type" in values and values["assign_type"] == "TRANSIT": + assert value is not None, "must be specified when assign_type==TRANSIT" + return value + + @validator("headway_fraction", always=True) + def headway_fraction_valid(value, values): + """Validate headway_fraction exists if assign_type is TRANSIT.""" + if "assign_type" in values and values["assign_type"] == "TRANSIT": + assert value is not None, "must be specified when assign_type==TRANSIT" + return value + + @validator("transfer_wait_perception_factor", always=True) + def transfer_wait_perception_factor_valid(value, values): + """Validate transfer_wait_perception_factor exists if assign_type is TRANSIT.""" + if "assign_type" in values and values["assign_type"] == "TRANSIT": + assert value is not None, "must be specified when assign_type==TRANSIT" + return value + + @classmethod + @validator("mode_id") + def mode_id_valid(cls, value): + """Validate mode_id.""" + assert len(value) == 1, "mode_id must be one character" + return value + @dataclass(frozen=True) class TransitVehicleConfig(ConfigItem): - """Transit vehicle definition (see also transit vehicle in the Emme API)""" + """Transit vehicle definition (see also transit vehicle in the Emme API).""" - vehicle_id: int - mode: str - name: str + vehicle_id: Optional[int] = Field(default=None, ge=0) + mode: Optional[str] = Field(default="") + name: Optional[str] = Field(default="") auto_equivalent: Optional[float] = Field(default=0, ge=0) seated_capacity: Optional[int] = Field(default=None, ge=0) total_capacity: Optional[int] = Field(default=None, ge=0) +@dataclass(frozen=True) +class TransitClassConfig(ConfigItem): + """Transit demand class definition.""" + + skim_set_id: str + name: str + description: str + mode_types: Tuple[str, ...] + demand: Tuple[ClassDemandConfig, ...] + required_mode_combo: Optional[Tuple[str, ...]] = Field(default=None) + + +@dataclass(frozen=True) +class AssignmentStoppingCriteriaConfig(ConfigItem): + "Assignment stop configuration parameters." + max_iterations: int + relative_difference: float + percent_segments_over_capacity: float + + +@dataclass(frozen=True) +class CcrWeightsConfig(ConfigItem): + "Weights for CCR Configuration." + min_seat: float = Field(default=1.0) + max_seat: float = Field(default=1.4) + power_seat: float = Field(default=2.2) + min_stand: float = Field(default=1.4) + max_stand: float = Field(default=1.6) + power_stand: float = Field(default=3.4) + + +@dataclass(frozen=True) +class CongestedWeightsConfig(ConfigItem): + "Weights for Congested Transit Assignment Configuration." + min_seat: float = Field(default=1.0) + max_seat: float = Field(default=1.4) + power_seat: float = Field(default=2.2) + min_stand: float = Field(default=1.4) + max_stand: float = Field(default=1.6) + power_stand: float = Field(default=3.4) + + +@dataclass(frozen=True) +class EawtWeightsConfig(ConfigItem): + "Weights for calculating extra added wait time Configuration." + constant: float = Field(default=0.259625) + weight_inverse_headway: float = Field(default=1.612019) + vcr: float = Field(default=0.005274) + exit_proportion: float = Field(default=0.591765) + default_eawt_factor: float = Field(default=1) + + +@dataclass(frozen=True) +class CongestedAssnConfig(ConfigItem): + "Congested transit assignment Configuration." + trim_demand_before_congested_transit_assignment: bool = False + output_trimmed_demand_report_path: str = Field(default=None) + normalized_gap: float = Field(default=0.25) + relative_gap: float = Field(default=0.25) + use_peaking_factor: bool = False + am_peaking_factor: float = Field(default=1.219) + pm_peaking_factor: float = Field(default=1.262) + + @dataclass(frozen=True) class TransitConfig(ConfigItem): - """Transit assignment parameters""" + """Transit assignment parameters.""" modes: Tuple[TransitModeConfig, ...] - vehicles: Tuple[TransitVehicleConfig, ...] - + classes: Tuple[TransitClassConfig, ...] apply_msa_demand: bool value_of_time: float + walk_speed: float + transit_speed: float effective_headway_source: str initial_wait_perception_factor: float transfer_wait_perception_factor: float walk_perception_factor: float - initial_boarding_penalty: float - transfer_boarding_penalty: float + walk_perception_factor: float + walk_perception_factor_cbd: float + drive_perception_factor: float max_transfers: int - output_skim_path: str - fares_path: str - fare_matrix_path: str - fare_max_transfer_distance_miles: float use_fares: bool + fare_2015_to_2000_deflator: float + fares_path: pathlib.Path + fare_matrix_path: pathlib.Path + fare_max_transfer_distance_miles: float override_connector_times: bool + use_ccr: bool + ccr_stop_criteria: Optional[AssignmentStoppingCriteriaConfig] + ccr_weights: CcrWeightsConfig + eawt_weights: EawtWeightsConfig + congested_transit_assignment: bool + congested: CongestedAssnConfig + congested_weights: CongestedWeightsConfig + output_skim_path: pathlib.Path + output_skim_filename_tmpl: str = Field() + output_skim_matrixname_tmpl: str = Field() + output_stop_usage_path: Optional[str] = Field(default=None) + output_transit_boardings_path: Optional[str] = Field(default=None) + output_transit_segment_path: Optional[str] = Field(default=None) + output_station_to_station_flow_path: Optional[str] = Field(default=None) + output_transfer_at_station_path: Optional[str] = Field(default=None) + timed_transfer_nodes: Tuple[int, ...] = Field() + output_transfer_at_station_node_ids: Dict[str, int] = Field() + max_ccr_iterations: float = None + split_connectors_to_prevent_walk: bool = False input_connector_access_times_path: Optional[str] = Field(default=None) input_connector_egress_times_path: Optional[str] = Field(default=None) - output_stop_usage_path: Optional[str] = Field(default=None) - + vehicles: Optional[TransitVehicleConfig] = Field(default_factory=TransitVehicleConfig) @dataclass(frozen=True) class EmmeConfig(ConfigItem): @@ -600,9 +1247,10 @@ class EmmeConfig(ConfigItem): Properties: all_day_scenario_id: scenario ID to use for all day (initial imported) scenario with all time period data - project_path: relative path to Emme desktop project (.emp) + project_path: relative path from run_dir to Emme desktop project (.emp) highway_database_path: relative path to highway Emmebank - active_database_paths: list of relative paths to active mode Emmebanks + active_north_database_path: relative paths to active mode Emmebank for north bay + active_south_database_path: relative paths to active mode Emmebank for south bay transit_database_path: relative path to transit Emmebank num_processors: the number of processors to use in Emme procedures, either as an integer, or value MAX, MAX-N. Typically recommend @@ -611,16 +1259,17 @@ class EmmeConfig(ConfigItem): """ all_day_scenario_id: int - project_path: str - highway_database_path: str - active_database_paths: Tuple[str, ...] - transit_database_path: str - num_processors: str = Field(regex=r"(?i)^MAX$|^MAX[\s]*-[\s]*[\d]+$|^[\d]+$") + project_path: pathlib.Path + highway_database_path: pathlib.Path + active_north_database_path: pathlib.Path + active_south_database_path: pathlib.Path + transit_database_path: pathlib.Path + num_processors: str = Field(regex=r"^MAX$|^MAX-\d+$|^\d+$") @dataclass(frozen=True) class Configuration(ConfigItem): - """Configuration: root of the model configuration""" + """Configuration: root of the model configuration.""" scenario: ScenarioConfig run: RunConfig @@ -633,31 +1282,37 @@ class Configuration(ConfigItem): highway: HighwayConfig transit: TransitConfig emme: EmmeConfig + logging: Optional[LoggingConfig] = Field(default_factory=LoggingConfig) @classmethod - def load_toml(cls, path: Union[str, List[str]]): - """Load configuration from .toml files(s) + def load_toml( + cls, + toml_path: Union[List[Union[str, pathlib.Path]], str, pathlib.Path], + ) -> "Configuration": + """Load configuration from .toml files(s). Normally the config is split into a scenario_config.toml file and a model_config.toml file. Args: - path: a valid system path to a TOML format config file or list of paths + toml_path: a valid system path string or Path object to a TOML format config file or + list of paths of path objects to a set of TOML files. Returns: A Configuration object """ - if isinstance(path, str): - path = [path] - data = _load_toml(path[0]) - for path_item in path[1:]: + if not isinstance(toml_path, List): + toml_path = [toml_path] + toml_path = list(map(pathlib.Path, toml_path)) + + data = _load_toml(toml_path[0]) + for path_item in toml_path[1:]: _merge_dicts(data, _load_toml(path_item)) return cls(**data) - @classmethod @validator("highway") def maz_skim_period_exists(cls, value, values): - """Validate highway.maz_to_maz.skim_period refers to a valid period""" + """Validate highway.maz_to_maz.skim_period refers to a valid period.""" if "time_periods" in values: time_period_names = set(time.name for time in values["time_periods"]) assert ( @@ -667,7 +1322,7 @@ def maz_skim_period_exists(cls, value, values): def _load_toml(path: str) -> dict: - """Load config from toml file at path""" + """Load config from toml file at path.""" with open(path, "r", encoding="utf-8") as toml_file: data = toml.load(toml_file) return data diff --git a/tm2py/controller.py b/tm2py/controller.py index defd6ba3..1c29e7ac 100644 --- a/tm2py/controller.py +++ b/tm2py/controller.py @@ -6,33 +6,60 @@ Typical usage example: from tm2py.controller import RunController controller = RunController( - [r"example_union\\scenario.toml", r"example_union\\model.toml"]) + ["scenario.toml", "model.toml"]) controller.run() Or from the command-line: - python \\tm2py\\tm2py\\controller.py –s scenario.toml –m model.toml + `python /tm2py/tm2py/controller.py –s scenario.toml –m model.toml` """ - import itertools +import multiprocessing import os -from typing import Union, List +import queue +import re +from collections import deque +from io import RawIOBase +from multiprocessing.sharedctypes import Value +from pathlib import Path +from typing import Any, Collection, Dict, List, Tuple, Union -from tm2py.config import Configuration -from tm2py.emme.manager import EmmeManager -from tm2py.logger import Logger from tm2py.components.component import Component +from tm2py.components.demand.air_passenger import AirPassenger +from tm2py.components.demand.commercial import CommercialVehicleModel +from tm2py.components.demand.household import HouseholdModel +from tm2py.components.demand.internal_external import InternalExternal +from tm2py.components.network.active.active_modes import ActiveModesSkim +from tm2py.components.network.create_tod_scenarios import CreateTODScenarios +from tm2py.components.network.highway.drive_access_skims import DriveAccessSkims from tm2py.components.network.highway.highway_assign import HighwayAssignment -from tm2py.components.network.highway.highway_network import PrepareNetwork from tm2py.components.network.highway.highway_maz import AssignMAZSPDemand, SkimMAZCosts +from tm2py.components.network.highway.highway_network import PrepareNetwork +from tm2py.components.network.transit.transit_assign import TransitAssignment +from tm2py.components.network.transit.transit_network import PrepareTransitNetwork +from tm2py.components.network.transit.transit_skim import TransitSkim +from tm2py.config import Configuration +from tm2py.emme.manager import EmmeManager +from tm2py.logger import Logger +from tm2py.tools import emme_context # mapping from names referenced in config.run to imported classes # NOTE: component names also listed as literal in tm2py.config for validation component_cls_map = { + "active_modes": ActiveModesSkim, + "create_tod_scenarios": CreateTODScenarios, "prepare_network_highway": PrepareNetwork, "highway": HighwayAssignment, "highway_maz_assign": AssignMAZSPDemand, "highway_maz_skim": SkimMAZCosts, + "drive_access_skims": DriveAccessSkims, + "prepare_network_transit": PrepareTransitNetwork, + "transit_assign": TransitAssignment, + "transit_skim": TransitSkim, + "air_passenger": AirPassenger, + "internal_external": InternalExternal, + "truck": CommercialVehicleModel, + "household": HouseholdModel, } # pylint: disable=too-many-instance-attributes @@ -56,111 +83,231 @@ class RunController: transit assignments and skims) utilities. complete_components: list of components which have completed, tuple of (iteration, name, Component object) + + Internal properties: + _emme_manager: EmmeManager object, cached on first access + _iteration: current iteration + _component: current running / last run Component + _component_name: name of the current / last run component + _queued_components: list of iteration, name, Component """ - def __init__(self, config_file: Union[List[str], str] = None, run_dir: str = None): - if not isinstance(config_file, list): - config_file = [config_file] + def __init__( + self, + config_file: Union[Collection[Union[str, Path]], str, Path] = None, + run_dir: Union[Path, str] = None, + run_components: Collection[str] = component_cls_map.keys(), + ): + """Constructor for RunController class. + + Args: + config_file: Single or list of config file locations as strings or Path objects. + Defaults to None. + run_dir: Model run directory as a Path object or string. If not provided, defaults + to the directory of the first config_file. + run_components: List of component names to run. Defaults to all components. + """ if run_dir is None: - run_dir = os.path.abspath(os.path.dirname(config_file[0])) - self._run_dir = run_dir + run_dir = Path(os.path.abspath(os.path.dirname(config_file[0]))) + + self._run_dir = Path(run_dir) self.config = Configuration.load_toml(config_file) + self.has_emme: bool = emme_context() + # NOTE: Logger opens log file on __enter__ (in run), not ready for logging yet + # Logger uses self.config.logging self.logger = Logger(self) self.top_sheet = None self.trace = None self.completed_components = [] - # mapping from defined names referenced in config to Component objects - self._component_map = {k: v(self) for k, v in component_cls_map.items()} + self._validated_components = set() self._emme_manager = None self._iteration = None self._component = None - self._queued_components = [] - self._queue_components() + self._component_name = None + self._queued_components = deque() + + # mapping from defined names referenced in config to Component objects + self._component_map = { + k: v(self) for k, v in component_cls_map.items() if k in run_components + } + + self._queue_components(run_components=run_components) + + def __repr__(self): + """Legible representation.""" + _str = f"""RunController + Run Directory: {self.run_dir} + Iteration: {self.iteration} of {self.run_iterations} + Component: {self.component_name} + Completed: {self.completed_components} + Queued: {self._queued_components}""" + return _str @property - def run_dir(self) -> str: - """The root run directory of the model run""" + def run_dir(self) -> Path: + """The root run directory of the model run.""" return self._run_dir + @property + def run_iterations(self) -> List[int]: + """List of iterations for this model run.""" + return range( + max(1, self.config.run.start_iteration), self.config.run.end_iteration + 1 + ) + + @property + def time_period_names(self) -> List[str]: + """Return input time_period name or names and return list of time_period names. + + Implemented here for easy access for all components. + + Returns: list of uppercased string names of time periods + """ + return [time.name.upper() for time in self.config.time_periods] + + @property + def time_period_durations(self) -> dict: + """Return mapping of time periods to durations in hours.""" + return dict((p.name, p.length_hours) for p in self.config.time_periods) + + @property + def congested_transit_assn_max_iteration(self) -> dict: + """Return mapping of time periods to max iteration in congested transit assignment.""" + return dict((p.name, p.congested_transit_assn_max_iteration) for p in self.config.time_periods) + + @property + def num_processors(self) -> int: + return self.emme_manager.num_processors + @property def iteration(self) -> int: - """Current iteration of model""" + """Current iteration of model run.""" return self._iteration @property + def component_name(self) -> str: + """Name of current component of model run.""" + return self._component_name + + @property + def iter_component(self) -> Tuple[int, str]: + """Tuple of the current iteration and component name.""" + return self._iteration, self._component_name + def component(self) -> Component: - """Current component of model""" + """Current component of model.""" return self._component @property def emme_manager(self) -> EmmeManager: - """Cached Emme Manager object""" + """Cached Emme Manager object.""" if self._emme_manager is None: - self._init_emme_manager() + if self.has_emme: + self._emme_manager = EmmeManager(self, self.config.emme) + else: + self.logger.log("Emme not found, skipping Emme-related components") + # TODO: All of the Emme-related components need to be handled "in place" rather + # than skippping using a Mock + from unittest.mock import MagicMock + + self._emme_manager = MagicMock() return self._emme_manager - def _init_emme_manager(self): - """Initialize Emme manager, start Emme desktop App, and initialize Modeller""" - self._emme_manager = EmmeManager() - project = self._emme_manager.project( - os.path.join(self.run_dir, self.config.emme.project_path) - ) - # Initialize Modeller to use Emme assignment tools and other APIs - self._emme_manager.modeller(project) + def get_abs_path(self, rel_path: Union[Path, str]) -> Path: + """Get the absolute path from the root run directory given a relative path.""" + if not isinstance(rel_path, Path): + rel_path = Path(rel_path) + return Path(os.path.join(self.run_dir, rel_path)) def run(self): - """Main interface to run model""" + """Main interface to run model. + + Iterates through the self._queued_components and runs them. + """ self._iteration = None - self.validate_inputs() - for iteration, name, component in self._queued_components: - if self._iteration != iteration: - self.logger.log_time(f"Start iteration {iteration}") - self._iteration = iteration - self._component = component - component.run() - self.completed_components.append((iteration, name, component)) - - def _queue_components(self): - """Add components per iteration to queue according to input Config""" - self._queued_components = [] - if self.config.run.start_iteration == 0: - self._queued_components += [ - (0, c_name, self._component_map[c_name]) - for c_name in self.config.run.initial_components + while self._queued_components: + self.run_next() + + def run_next(self): + """Run next component in the queue.""" + if not self._queued_components: + raise ValueError("No components in queue") + iteration, name, component = self._queued_components.popleft() + if self._iteration != iteration: + self.logger.log(f"Start iteration {iteration}") + self._iteration = iteration + self._component = component + component.run() + self.completed_components.append((iteration, name, component)) + + def _queue_components(self, run_components: Collection[str] = None): + """Add components per iteration to queue according to input Config. + + Args: + run_components: if provided, only run these components + """ + try: + assert not self._queued_components + except AssertionError: + "Components already queued, returning without re-queuing." + return + + print("RUN COMPOMENTS", run_components) + _initial_components = self.config.run.initial_components + _global_iter_components = self.config.run.global_iteration_components + _final_components = self.config.run.final_components + + if run_components is not None: + _initial_components = [ + c for c in _initial_components if c in run_components ] - iteration_nums = range( - max(1, self.config.run.start_iteration), self.config.run.end_iteration + 1 - ) - iteration_components = [ - self._component_map[c_name] - for c_name in self.config.run.global_iteration_components - ] - self._queued_components += list( - itertools.product( - iteration_nums, - iteration_components, - self.config.run.global_iteration_components, - ) + _global_iter_components = [ + c for c in _global_iter_components if c in run_components + ] + _final_components = [c for c in _final_components if c in run_components] + + if self.config.run.start_iteration == 0: + for _c_name in _initial_components: + self._add_component_to_queue(0, _c_name) + + # Queue components which are run for each iteration + + _iteration_x_components = itertools.product( + self.run_iterations, _global_iter_components ) - self._queued_components += [ - (self.config.run.end_iteration + 1, self._component_map[c_name]) - for c_name in self.config.run.final_components - ] + for _iteration, _c_name in _iteration_x_components: + self._add_component_to_queue(_iteration, _c_name) + + # Queue components which are run after final iteration + _finalizer_iteration = self.config.run.end_iteration + 1 + + for c_name in _final_components: + self._add_component_to_queue(_finalizer_iteration, _c_name) + + # If start_component specified, remove things before its first occurance if self.config.run.start_component: - start_index = [ - idx - for idx, c in enumerate(self._queued_components) - if self.config.run.start_component == c[1] - ][0] - self._queued_components = self._queued_components[start_index:] - - def validate_inputs(self): - """Validate input state prior to run""" - already_validated_components = set() - for _, name, component in self._queued_components: - if name not in already_validated_components: - component.validate_inputs() - already_validated_components.add(name) + + _queued_c_names = [c.name for c in self._queued_components] + if self.config.run.start_component not in _queued_c_names: + raise ValueError( + f"Start component {self.config.run.start_component} not found in queued \ + components {_queued_c_names}" + ) + _start_c_index = _queued_c_names.index(self.config.run.start_component) + self._queued_components = self._queued_components[_start_c_index:] + + def _add_component_to_queue(self, iteration: int, component_name: str): + """Add component to queue (self._queued_components), first validating its inputs. + + Args: + iteration (int): iteration to add component to. + component_name (Component): Component to add to queue. + """ + _component = self._component_map[component_name] + if component_name not in self._validated_components: + _component.validate_inputs() + self._validated_components.add(component_name) + self._queued_components.append((iteration, component_name, _component)) diff --git a/tm2py/emme/__init__.py b/tm2py/emme/__init__.py index e69de29b..26ce2342 100644 --- a/tm2py/emme/__init__.py +++ b/tm2py/emme/__init__.py @@ -0,0 +1 @@ +"""Emme components module.""" diff --git a/tm2py/emme/manager.py b/tm2py/emme/manager.py index 13077a56..dabc65fa 100644 --- a/tm2py/emme/manager.py +++ b/tm2py/emme/manager.py @@ -1,4 +1,4 @@ -"""Module for Emme Manager for centralized management of Emme projects +"""Module for Emme Manager for centralized management of Emme projects. Centralized location for Emme API imports, which are automatically replaced by unittest.Mock / MagicMock to support testing where Emme is not installed. @@ -8,67 +8,178 @@ and Modeller. """ -from contextlib import contextmanager as _context +import multiprocessing import os +import re +from contextlib import contextmanager as _context +from pathlib import Path from socket import error as _socket_error -from typing import Any, Dict, List, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from typing_extensions import Literal + +from ..tools import emme_context + +emme_context() + +import inro.emme.desktop.app as _app + +if TYPE_CHECKING: + from tm2py.config import EmmeConfig + from tm2py.emme.manager import EmmeNetwork, EmmeScenario # PyLint cannot build AST from compiled Emme libraries # so disabling relevant import module checks # pylint: disable=E0611, E0401, E1101 +# Importing several Emme object types which are unused here, but so that +# the Emme API import are centralized within tm2py from inro.emme.database.emmebank import Emmebank -from inro.emme.network import Network as EmmeNetwork -from inro.emme.database.scenario import Scenario as EmmeScenario from inro.emme.database.matrix import Matrix as EmmeMatrix # pylint: disable=W0611 +from inro.emme.database.scenario import Scenario as EmmeScenario +from inro.emme.network import Network as EmmeNetwork +from inro.emme.network.link import Link as EmmeLink # pylint: disable=W0611 +from inro.emme.network.mode import Mode as EmmeMode # pylint: disable=W0611 from inro.emme.network.node import Node as EmmeNode # pylint: disable=W0611 -import inro.emme.desktop.app as _app -from inro.modeller import Modeller as EmmeModeller, logbook_write, logbook_trace +from inro.modeller import Modeller as EmmeModeller +from inro.modeller import logbook_trace, logbook_write EmmeDesktopApp = _app.App # "Emme Manager requires Emme to be installed unless running in a test environment." # "Please install Emme and try again." - # Cache running Emme projects from this process (simple singleton implementation) _EMME_PROJECT_REF = {} +class EmmeBank: + """Emmebamk wrapper class.""" + + def __init__(self, emme_manager, path: Union[str, Path]): + self.emme_manager = emme_manager + self.controller = self.emme_manager.controller + self._path = Path(path) + self._emmebank = None + self._zero_matrix = None + self.scenario_dict = { + tp.name: tp.emme_scenario_id for tp in self.controller.config.time_periods + } + + @property + def emmebank(self) -> Emmebank: + if self._emmebank is None: + self._emmebank = Emmebank(self.path) + return self._emmebank + + @property + def path(self) -> Path: + """Return the path to the Emmebank.""" + if not self._path.exists(): + self._path = self.get_abs_path(self._path) + if not self._path.exists(): + raise (FileNotFoundError(f"Emmebank not found: {self._path}")) + if not self._path.__str__().endswith("emmebank"): + self._path = os.path.join(self._path, "emmebank") + return self._path + + def change_dimensions(self, dimensions: Dict[str, int]): + """Change the Emmebank dimensions as specified. See the Emme API help for details. + + Args: + emmebank: the Emmebank object to change the dimensions + dimensions: dictionary of the specified dimensions to set. + """ + dims = self.emmebank.dimensions + new_dims = dims.copy() + new_dims.update(dimensions) + if dims != new_dims: + change_dimensions = self.emme_manager.tool( + "inro.emme.data.database.change_database_dimensions" + ) + change_dimensions(new_dims, self.emmebank, keep_backup=False) + + def scenario(self, time_period: str): + """Return the EmmeScenario for the given time period. + + Args: + time_period: valid time period abbreviation + """ + + _scenario_id = self.scenario_dict[time_period.lower()] + return self.emmebank.scenario(_scenario_id) + + def get_or_init(self, name: str, matrix_type: Literal["SCALAR", "FULL"] = "FULL"): + _matrix = self.emmebank.matrix(f'ms"{name}"') + if _matrix is None: + ident = self.emmebank.available_matrix_identifier(matrix_type) + _matrix = self.emmebank.create_matrix(ident) + _matrix.name = name + _matrix.description = name + # _matrix.data = 0 + return _matrix + + @property + def zero_matrix(self): + """Create ms"zero" matrix for zero-demand assignments.""" + if self._zero_matrix is None: + self._zero_matrix = self.get_or_init("zero", "SCALAR") + return self._zero_matrix + + class EmmeManager: - """Centralized cache for Emme project and related calls for traffic and transit assignments. + """Centralized cache for a single Emme project and related calls. + + Leverages EmmeConfig. Wraps Emme Desktop API (see Emme API Reference for additional details on the Emme objects). """ - def __init__(self): - # mapping of Emme project path to Emme Desktop API object for reference - # (projects are opened only once) - self._project_cache = _EMME_PROJECT_REF + def __init__(self, controller, emme_config: "EmmeConfig"): + """The EmmeManager constructor. - def close_all(self): + Maps an Emme project path to Emme Desktop API object for reference + (projects are opened only once). """ - Close all open cached Emme project(s). + self.controller = controller + self.config = emme_config - Should be called at the end of the model process / Emme assignments. - """ - while self._project_cache: - _, app = self._project_cache.popitem() - app.close() + self.project_path = self.controller.get_abs_path(self.config.project_path) - def create_project(self, project_dir: str, name: str) -> EmmeDesktopApp: - """Create, open and return Emme project + # see if works without os.path.normcase(os.path.realpath(project_path)) + self.highway_database_path = self.controller.get_abs_path( + self.config.highway_database_path + ) + self.transit_database_path = self.controller.get_abs_path( + self.config.transit_database_path + ) + self.active_north_database_path = self.controller.get_abs_path( + self.config.active_north_database_path + ) + self.active_south_database_path = self.controller.get_abs_path( + self.config.active_south_database_path + ) - Args: - project_dir: path to Emme root directory for new Emme project - name: name for the Emme project + self._num_processors = None + self._project = None + self._modeller = None - Returns: - Emme Desktop App object, see Emme API Reference, Desktop section for details. + self._highway_emmebank = None + self._transit_emmebank = None + self._active_north_emmebank = None + self._active_south_emmebank = None + + # Initialize Modeller to use Emme assignment tools and other APIs + self._modeller = self.modeller + + def close(self): + """Close all open cached Emme project(s). + + Should be called at the end of the model process / Emme assignments. """ - emp_path = _app.create_project(project_dir, name) - return self.project(emp_path) + self._project.close() - def project(self, project_path: str) -> EmmeDesktopApp: + @property + def project(self) -> EmmeDesktopApp: """Return already open Emme project, or open new Desktop session if not found. Args: @@ -77,55 +188,20 @@ def project(self, project_path: str) -> EmmeDesktopApp: Returns: Emme Desktop App object, see Emme API Reference, Desktop section for details. """ - project_path = os.path.normcase(os.path.realpath(project_path)) - emme_project = self._project_cache.get(project_path) - if emme_project: + if self._project is not None: try: # Check if the Emme window was closed - emme_project.current_window() + self._project.current_window() except _socket_error: - emme_project = None + self._project = None # if window is not opened in this process, start a new one - if emme_project is None: - if not os.path.isfile(project_path): - raise Exception(f"Emme project path does not exist {project_path}") - emme_project = _app.start_dedicated( - visible=True, user_initials="inro", project=project_path + if self._project is None: + self._project = _app.start_dedicated( + visible=True, user_initials="inro", project=self.project_path ) - self._project_cache[project_path] = emme_project - return emme_project + return self._project - @staticmethod - def emmebank(path: str) -> Emmebank: - """Open and return the Emmebank at path. - - Args: - path: valid system path pointing to an Emmebank file - Returns: - Emmebank object, see Emme API Reference, Database section for details. - """ - if not path.endswith("emmebank"): - path = os.path.join(path, "emmebank") - return Emmebank(path) - - def change_emmebank_dimensions( - self, emmebank: Emmebank, dimensions: Dict[str, int] - ): - """Change the Emmebank dimensions as specified. See the Emme API help for details. - - Args: - emmebank: the Emmebank object to change the dimensions - dimensions: dictionary of the specified dimensions to set. - """ - dims = emmebank.dimensions - new_dims = dims.copy() - new_dims.update(dimensions) - if dims != new_dims: - change_dimensions = self.tool( - "inro.emme.data.database.change_database_dimensions" - ) - change_dimensions(new_dims, emmebank, keep_backup=False) - - def modeller(self, emme_project: EmmeDesktopApp = None) -> EmmeModeller: + @property + def modeller(self) -> EmmeModeller: """Initialize and return Modeller object. If Modeller has not already been initialized it will do so on @@ -140,18 +216,37 @@ def modeller(self, emme_project: EmmeDesktopApp = None) -> EmmeModeller: Emme Modeller object, see Emme API Reference, Modeller section for details. """ # pylint: disable=E0611, E0401, E1101 - try: - return EmmeModeller() - except AssertionError as error: - if emme_project is None: - if self._project_cache: - emme_project = next(iter(self._project_cache.values())) - else: - raise Exception( - "modeller not yet initialized and no cached Emme project," - " emme_project arg must be provided" - ) from error - return EmmeModeller(emme_project) + if self._modeller is None: + self._modeller = EmmeModeller(self.project) + return self._modeller + + @property + def highway_emmebank(self) -> EmmeBank: + if self._highway_emmebank is None: + self._highway_emmebank = EmmeBank(self, self.highway_database_path) + return self._highway_emmebank + + @property + def transit_emmebank(self) -> EmmeBank: + if self._transit_emmebank is None: + self._transit_emmebank = EmmeBank(self, self.transit_database_path) + return self._transit_emmebank + + @property + def active_north_emmebank(self) -> EmmeBank: + if self._active_north_emmebank is None: + self._active_north_emmebank = EmmeBank( + self, self.active_north_database_path + ) + return self._active_north_emmebank + + @property + def active_south_emmebank(self) -> EmmeBank: + if self._active_south_emmebank is None: + self._active_south_emmebank = EmmeBank( + self, self.active_south_database_path + ) + return self._active_south_emmebank def tool(self, namespace: str): """Return the Modeller tool at namespace. @@ -159,12 +254,91 @@ def tool(self, namespace: str): Returns: Corresponding Tool object, see Emme Help for full details. """ - return self.modeller().tool(namespace) + return self.modeller.tool(namespace) + + @property + def matrix_calculator(self): + "Shortcut to matrix calculator." + return self.controller.emme_manager.modeller.tool( + "inro.emme.matrix_calculation.matrix_calculator" + ) + + @property + def matrix_results(self): + "Shortcut to matrix results." + return self.controller.emme_manager.modeller.tool( + "inro.emme.transit_assignment.extended.matrix_results" + ) + + @property + def num_processors(self) -> int: + """Number of processors available for parallel processing.""" + if self._num_processors is None: + self._num_processors = self._calculate_num_processors() + + return self._num_processors + + @property + def num_processors(self): + """Convert input value (parse if string) to number of processors. + + + nt or string as 'MAX-X' + Returns: + An int of the number of processors to use + + Raises: + Exception: Input value exceeds number of available processors + Exception: Input value less than 1 processors + """ + _config_value = self.config.num_processors + _cpu_processors = multiprocessing.cpu_count() + num_processors = 0 + if isinstance(_config_value, str): + if _config_value.upper() == "MAX": + num_processors = _cpu_processors + elif re.match("^[0-9]+$", _config_value): + num_processors = int(_config_value) + else: + _processor_range = re.split(r"^MAX[/s]*-[/s]*", _config_value.upper()) + num_processors = max(_cpu_processors - int(_processor_range[1]), 1) + else: + num_processors = int(_config_value) + + num_processors = min(_cpu_processors, num_processors) + num_processors = max(1, num_processors) + + return num_processors + + @staticmethod + def copy_attribute_values( + src, + dst, + src_attributes: Dict[str, List[str]], + dst_attributes: Optional[Dict[str, List[str]]] = None, + ): + """Copy network/scenario attribute values from src to dst. + + Args: + src: Emme scenario object or Emme Network object + dst: Emme scenario object or Emme Network object + src_attributes: dictionary or Emme network domain to list of attribute names + NODE, LINK, TURN, TRANSIT_LINE, TRANSIT_SEGMENT + dst_attributes: Optional, names to use for the attributes in the dst object, + if not specified these are the same as src_attributes + """ + for domain, src_attrs in src_attributes.items(): + if src_attrs: + dst_attrs = src_attrs + if dst_attributes is not None: + dst_attrs = dst_attributes.get(domain, src_attrs) + values = src.get_attribute_values(domain, src_attrs) + dst.set_attribute_values(domain, dst_attrs, values) @staticmethod @_context def temp_attributes_and_restore( - scenario: EmmeScenario, attributes: List[List[str]] + scenario: "EmmeScenario", attributes: List[List[str]] ): """Create temp extra attribute and network field, and backup values and state and restore. @@ -218,36 +392,9 @@ def temp_attributes_and_restore( for domain, names, values in backup: scenario.set_attribute_values(domain, names, values) - @staticmethod - def copy_attr_values( - domain: str, - src: Union[EmmeScenario, EmmeNetwork], - dst: Union[EmmeScenario, EmmeNetwork], - src_names: List[str], - dst_names: List[str] = None, - ): - """Copy attribute values between Emme scenario (on disk) and network (in memory). - - Args: - domain: attribute domain, one of "NODE", "LINK", "TURN", "TRANSIT_LINE", - "TRANSIT_SEGMENT" - src: source Emme scenario or network to load values from - dst: destination Emme scenario or network to save values to - src_names: names of the attributes for loading values - dst_names: optional, names of the attributes to save values as, defaults - to using the src_names if not specified - - Returns: - Emme Modeller object, see Emme API Reference, Modeller section for details. - """ - if dst_names is None: - dst_names = src_names - values = src.get_attribute_values(domain, src_names) - dst.set_attribute_values(domain, dst_names, values) - def get_network( - self, scenario: EmmeScenario, attributes: Dict[str, List[str]] = None - ) -> EmmeNetwork: + self, scenario: "EmmeScenario", attributes: Dict[str, List[str]] = None + ) -> "EmmeNetwork": """Read partial Emme network from the scenario for the domains and attributes specified. Optimized load of network object from scenario (disk / emmebank) for only the @@ -270,9 +417,7 @@ def get_network( network = scenario.get_partial_network( attributes.keys(), include_attributes=False ) - for domain, attrs in attributes.items(): - if attrs: - self.copy_attr_values(domain, scenario, network, attrs) + self.copy_attribute_values(scenario, network, attributes) return network @staticmethod diff --git a/tm2py/emme/matrix.py b/tm2py/emme/matrix.py index 9059cce8..829eddc7 100644 --- a/tm2py/emme/matrix.py +++ b/tm2py/emme/matrix.py @@ -10,22 +10,25 @@ from disk. """ -from typing import List, Union, Dict +from typing import Dict, List, Optional, Union -from numpy import array as NumpyArray, resize import openmatrix as _omx +from numpy import array as NumpyArray +from numpy import exp, pad, resize -from tm2py.emme.manager import EmmeScenario, EmmeMatrix +from tm2py.emme.manager import EmmeMatrix, EmmeScenario class MatrixCache: - """Write through cache of Emme matrix data via Numpy arrays - - Args: - scenario: reference scenario for the active Emmebank and matrix zone system - """ + """Write through cache of Emme matrix data via Numpy arrays.""" def __init__(self, scenario: EmmeScenario): + """Contructor for MatrixCache class. + + Args: + scenario (EmmeScenario): EmmeScenario reference scenario for the active Emmebank + and matrix zone system + """ self._scenario = scenario self._emmebank = scenario.emmebank # mapping from matrix object to last read/write timestamp for cache invalidation @@ -33,6 +36,32 @@ def __init__(self, scenario: EmmeScenario): # cache of Emme matrix data, key: matrix object, value: numpy array of data self._data = {} + def get_or_init_matrix( + self, + name: str, + matrix_type: Optional[str] = "FULL", + description: Optional[str] = None, + ): + """Add matrix to emmebank if it doesn't exist and return as object. + + Args: + name: name of matrix - sans spaces + matrix_type: One of "ORIGIN","DESTINATION","FULL". Defaults to "FULL". + description: description of matrix, if not provided, will default to name. + """ + _matrix = self._emmebank.matrix(name) + if _matrix: + return _matrix + + _id = self._emmebank.available_matrix_identifier(matrix_type) + _matrix = self._emmebank.create_matrix(_id) + _matrix.name = name + if description is None: + description = name + _matrix.description = description + + return _matrix + def get_data(self, matrix: Union[str, EmmeMatrix]) -> NumpyArray: """Get Emme matrix data as numpy array. @@ -51,19 +80,39 @@ def get_data(self, matrix: Union[str, EmmeMatrix]) -> NumpyArray: self._data[matrix] = matrix.get_numpy_data(self._scenario.id) return self._data[matrix] - def set_data(self, matrix: Union[str, EmmeMatrix], data: NumpyArray): - """Set numpy array to Emme matrix (write through cache). + def set_data( + self, + matrix: Union[str, EmmeMatrix], + data: NumpyArray, + matrix_type: Optional[str] = "FULL", + description: Optional[str] = None, + ): + """Set numpy array to Emme matrix, filling zones and creating matrix in Emmebank if necessary. Args: matrix: Emme matrix object or unique name / ID for Emme matrix in Emmebank data: Numpy array, must match the scenario zone system + matrix_type: one of "ORIGIN","DESTINATION","FULL". Defaults to "FULL". + description: description of matrix, if not provided, will default to name. """ + # Reshape so that zone sizes match by padding external stations with zeros + num_zones = len(self._scenario.zone_numbers) + shape = data.shape + if shape[0] < num_zones: + padding = [(0, num_zones - dim_shape) for dim_shape in shape] + data = pad(data, padding) + if isinstance(matrix, str): - matrix = self._emmebank.matrix(matrix) + matrix = self.get_or_init_matrix( + matrix, matrix_type=matrix_type, description=description + ) + matrix.set_numpy_data(data, self._scenario.id) self._timestamps[matrix] = matrix.timestamp self._data[matrix] = data + return matrix + def clear(self): """Clear the cache.""" self._timestamps = {} @@ -76,19 +125,7 @@ class OMXManager: """Wrapper for the OMX interface to write from Emme matrices and numpy arrays. Write from Emmebank or Matrix Cache to OMX file, or read from OMX to Numpy. - Also supports with statement. - - Args: - file_path: path of OMX file - mode: "r", "w" or "a" - scenario: Emme scenario object for zone system and reference - Emmebank - omx_key: "ID_NAME", "NAME", "ID", format for generating - OMX key from Emme matrix data - matrix_cache: optional, Matrix Cache to support write data - from cache (instead of always reading from Emmmebank) - mask_max_value: optional, max value above which to write - zero instead ("big to zero" behavior) + Supports "with" statement. """ def __init__( @@ -99,12 +136,29 @@ def __init__( omx_key: str = "NAME", matrix_cache: MatrixCache = None, mask_max_value: float = None, + growth_factor: float = None ): # pylint: disable=R0913 + """The OMXManager constructor. + + Args: + file_path (str): path of OMX file + mode (str, optional): "r", "w" or "a". Defaults to "r". + scenario (EmmeScenario, optional): _description_. Defaults to None. + omx_key (str, optional): "ID_NAME", "NAME", "ID", format for generating + OMX key from Emme matrix data. Defaults to "NAME". + matrix_cache (MatrixCache, optional): Matrix Cache to support write data + from cache (instead of always reading from Emmmebank). Defaults to None. + mask_max_value (float, optional): max value above which to write + zero instead ("big to zero" behavior). Defaults to None. + growth_factor (float, optional): grow the value in each cell by a factor + (e.g. write out ivt skim in minute*100) + """ self._file_path = file_path self._mode = mode self._scenario = scenario self._omx_key = omx_key self._mask_max_value = mask_max_value + self._growth_factor = growth_factor self._omx_file = None self._emme_matrix_cache = matrix_cache self._read_cache = {} @@ -130,6 +184,7 @@ def close(self): self._read_cache = {} def __enter__(self): + """Allows for context-based usage using 'with' statement.""" self.open() if self._mode in ["a", "w"] and self._scenario is not None: try: @@ -141,6 +196,7 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): + """Allows for context-based usage using 'with' statement.""" self.close() def write_matrices(self, matrices: List[Union[EmmeMatrix, str]]): @@ -158,7 +214,7 @@ def write_matrices(self, matrices: List[Union[EmmeMatrix, str]]): for matrix in matrices: self.write_matrix(matrix) - def write_matrix(self, matrix: [str, EmmeMatrix], name=None): + def write_matrix(self, matrix: Union[str, EmmeMatrix], name=None): """Write Emme matrix (as name or ID or Emme matrix object). Args: @@ -184,7 +240,7 @@ def write_matrix(self, matrix: [str, EmmeMatrix], name=None): n_zones = len(numpy_array) numpy_array = resize(numpy_array, (n_zones, 1)) attrs = {"description": matrix.description} - self.write_array(numpy_array, name, attrs) + self.write_array(numpy_array, name, "float64", attrs) def write_clipped_array( self, @@ -207,10 +263,10 @@ def write_clipped_array( numpy_array = numpy_array.clip(a_min, a_max) else: numpy_array = numpy_array.clip(a_min) - self.write_array(numpy_array, name, attrs) + self.write_array(numpy_array, name, "float64", attrs) def write_array( - self, numpy_array: NumpyArray, name: str, attrs: Dict[str, str] = None + self, numpy_array: NumpyArray, name: str, data_type: str = "float64", attrs: Dict[str, str] = None ): """Write array with name and optional attrs to OMX file. @@ -218,6 +274,7 @@ def write_array( numpy_array:: Numpy array name: name to use for the OMX key attrs: additional attribute key value pairs to write to OMX file + data_type: int, float32, float64, etc. """ if self._mode not in ["a", "w"]: raise Exception(f"{self._file_path}: open in read-only mode") @@ -228,7 +285,9 @@ def write_array( chunkshape = None if self._mask_max_value: numpy_array[numpy_array > self._mask_max_value] = 0 - numpy_array = numpy_array.astype(dtype="float64", copy=False) + if self._growth_factor: + numpy_array = numpy_array * self._growth_factor + numpy_array = numpy_array.astype(dtype=data_type, copy=False) self._omx_file.create_matrix( name, obj=numpy_array, chunkshape=chunkshape, attrs=attrs ) diff --git a/tm2py/emme/network.py b/tm2py/emme/network.py index 99f80709..d16d0cc5 100644 --- a/tm2py/emme/network.py +++ b/tm2py/emme/network.py @@ -1,18 +1,25 @@ """Module for Emme network calculations. Contains NetworkCalculator class to generate Emme format specifications for -the Network calculator.""" +the Network calculator. +""" +import heapq +from collections import defaultdict as _defaultdict +from typing import Any, Callable, Dict, List, Union -from typing import Union, Dict, List +from inro.emme.network.link import Link as EmmeNetworkLink +from inro.emme.network.node import Node as EmmeNetworkNode import tm2py.emme.manager as _manager EmmeScenario = _manager.EmmeScenario EmmeNetworkCalcSpecification = Dict[str, Union[str, Dict[str, str]]] +_INF = 1e400 + class NetworkCalculator: - """Simple wrapper interface to the Emme Network calculator + """Simple wrapper interface to the Emme Network calculator. Used to generate the standard network calculator specification (dictionary) from argument inputs. Useful when NOT (commonly) using selection or @@ -22,11 +29,14 @@ class NetworkCalculator: scenario: Emme scenario object """ - def __init__(self, scenario: EmmeScenario): + def __init__(self, controller, scenario: EmmeScenario): + """Constructor for NetworkCalculator class. + + Args: + scenario (EmmeScenario): Reference EmmeScenario object + """ self._scenario = scenario - emme_manager = _manager.EmmeManager() - modeller = emme_manager.modeller() - self._network_calc = modeller.tool( + self._network_calc = controller.emme_manager.modeller.tool( "inro.emme.network_calculation.network_calculator" ) self._specs = [] @@ -79,7 +89,7 @@ def add_calc( def run(self) -> List[Dict[str, float]]: """Run accumulated network calculations all at once. - Returns: + Returns A list of dictionary reports with min, max, average and sum of the calculation expression. See Emme help 'Network calculator' for more. """ @@ -107,3 +117,69 @@ def _format_spec( else: spec["selections"] = {"link": "all"} return spec + + +def find_path( + orig_node: EmmeNetworkNode, + dest_node: EmmeNetworkNode, + filter_func: Callable, + cost_func: Callable, +) -> List[EmmeNetworkLink]: + """Find and return the shortest path (sequence of links) between two nodes in Emme network. + Args: + orig_node: origin Emme node object + dest_node: desination Emme node object + filter_func: callable function which accepts an Emme network link and returns True if included and False + if excluded. E.g. lambda link: mode in link.modes + cost_func: callable function which accepts an Emme network link and returns the cost value for the link. + """ + visited = set([]) + visited_add = visited.add + costs = _defaultdict(lambda: _INF) + back_links = {} + heap = [] + pop, push = heapq.heappop, heapq.heappush + outgoing = None + link_found = False + for outgoing in orig_node.outgoing_links(): + if filter_func(outgoing): + back_links[outgoing] = None + if outgoing.j_node == dest_node: + link_found = True + break + cost_to_link = cost_func(outgoing) + costs[outgoing] = cost_to_link + push(heap, (cost_to_link, outgoing)) + try: + while not link_found: + cost_to_link, link = pop(heap) + if link in visited: + continue + visited_add(link) + for outgoing in link.j_node.outgoing_links(): + if not filter_func(outgoing): + continue + if outgoing in visited: + continue + outgoing_cost = cost_to_link + cost_func(outgoing) + if outgoing_cost < costs[outgoing]: + back_links[outgoing] = link + costs[outgoing] = outgoing_cost + push(heap, (outgoing_cost, outgoing)) + if outgoing.j_node == dest_node: + link_found = True + break + except IndexError: + pass # IndexError if heap is empty + if not link_found or outgoing is None: + raise NoPathFound("No path found between %s and %s" % (orig_node, dest_node)) + prev_link = outgoing + route = [] + while prev_link: + route.append(prev_link) + prev_link = back_links[prev_link] + return list(reversed(route)) + + +class NoPathFound(Exception): + pass diff --git a/tm2py/examples.py b/tm2py/examples.py index c772488b..168bcac9 100644 --- a/tm2py/examples.py +++ b/tm2py/examples.py @@ -1,4 +1,4 @@ -"""Download and unzip examples for tm2py, used in tests""" +"""Download and unzip examples for tm2py, used in tests.""" import os @@ -8,7 +8,7 @@ _ROOT_DIR = r".." _DEFAULT_EXAMPLE_URL = ( - r"https://mtcdrive.box.com/shared/static/3entr016e9teq2wt46x1os3fjqylfoge.zip" + r"https://mtcdrive.box.com/shared/static/8a71wv7jif0d844udf6gh902nuxd8bio.zip" ) _DEFAULT_EXAMPLE_SUBDIR = r"examples" _DEFAULT_EXAMPLE_NAME = "UnionCity" diff --git a/tm2py/logger.py b/tm2py/logger.py index 922b581a..dd4e9678 100644 --- a/tm2py/logger.py +++ b/tm2py/logger.py @@ -1,75 +1,568 @@ -"""Logging module +"""Logging module. + +Note the general definition of logging levels as used in tm2py: + +TRACE: highly detailed level information which would rarely be of interest + except for detailed debugging by a developer +DEBUG: diagnostic information which would generally be useful to a developer + debugging the model code; this may also be useful to a model operator in + some cases. +DETAIL: more detail than would normally be of interest, but might be useful + to a model operator debugging a model run / data or understanding + model results +INFO: detail which would normally be worth recording about the model operation +STATUS: top-level, model is running type messages. There should be + relatively few of these, generally one per component, or one per time + period if the procedure is long. +WARN: warning messages where there is a possibility of a problem +ERROR: problem causing operation to halt which is normal + (or not unexpected) in scope, e.g. file does not exist + Includes general Python exceptions. +FATAL: severe problem requiring operation to stop immediately. """ + +from __future__ import annotations + +import functools +import os +import socket +import traceback as _traceback +from abc import abstractmethod from contextlib import contextmanager as _context from datetime import datetime -import functools +from pprint import pformat +from typing import TYPE_CHECKING, Union + +import requests +from typing_extensions import Literal, get_args + +if TYPE_CHECKING: + from tm2py.controller import RunController + +LogLevel = Literal[ + "TRACE", "DEBUG", "DETAIL", "INFO", "STATUS", "WARN", "ERROR", "FATAL" +] +LEVELS_STR_TO_INT = dict((k, i) for i, k in enumerate(get_args(LogLevel))) +LEVELS_INT_TO_STR = dict((i, k) for i, k in enumerate(get_args(LogLevel))) + +# pylint: disable=too-many-instance-attributes class Logger: - """Logger""" + """Logging of message text for display, text file, and Emme logbook, as well as notify to slack. + + The log message levels can be one of: + TRACE, DEBUG, DETAIL, INFO, STATUS, WARN, ERROR, FATAL + Which will filter all messages of that severity and higher. + See module note on use of descriptive level names. + + logger.log("a message") + with logger.log_start_end("Running a set of steps"): + logger.log("Message with timestamp") + logger.log("A debug message", level="DEBUG") + # equivalently, use the .debug: + logger.debug("Another debug message") + if logger.debug_enabled: + # only generate this report if logging DEBUG + logger.log("A debug report that takes time to produce", level="DEBUG") + logger.notify_slack("A slack message") + + Methods can also be decorated with LogStartEnd (see class for more). + + Note that the Logger should only be initialized once per model run. + In places where the controller is not available, the last Logger + initialized can be obtained from the class method get_logger:: + + logger = Logger.get_logger() + + Internal properties: + _log_cache: the LogCache object + _log_formatters: list of objects that format text and record, either + to file, display (print to screen) or cache for log on error + _use_emme_logbook: whether Emme logbook is enabled + _slack_notifier: SlackNotifier object for sending messages to slack + """ + + # used to cache last initialized Logger + _instance = None + + def __new__(cls, controller: RunController): + """Logger __new__ method override. TODO. + + Args: + controller (RunController): TODO. + """ + # pylint: disable=unused-argument + cls._instance = super(Logger, cls).__new__(cls) + return cls._instance + + def __init__(self, controller: RunController): + """Constructor for Logger object. - def __init__(self, controller): - super().__init__() - self._controller = controller + Args: + controller (RunController): Associated RunController instance. + """ + self.controller = controller self._indentation = 0 + log_config = controller.config.logging + iter_component_level = log_config.iter_component_level or [] + iter_component_level = dict( + ((i, c), LEVELS_STR_TO_INT[l]) for i, c, l in iter_component_level + ) + display_logger = LogDisplay(LEVELS_STR_TO_INT[log_config.display_level]) + run_log_formatter = LogFile( + LEVELS_STR_TO_INT[log_config.run_file_level], + os.path.join(controller.run_dir, log_config.run_file_path), + ) + standard_log_formatter = LogFileLevelOverride( + LEVELS_STR_TO_INT[log_config.log_file_level], + os.path.join(controller.run_dir, log_config.log_file_path), + iter_component_level, + controller, + ) + self._log_cache = LogCache( + os.path.join(controller.run_dir, log_config.log_on_error_file_path) + ) + self._log_formatters = [ + display_logger, + run_log_formatter, + standard_log_formatter, + self._log_cache, + ] + + self._use_emme_logbook = self.controller.config.logging.use_emme_logbook + + self._slack_notifier = SlackNotifier(self) + + # open log formatters + for log_formatter in self._log_formatters: + if hasattr(log_formatter, "open"): + log_formatter.open() + + def __del__(self): + """ + Destructor for logger object + """ + for log_formatter in self._log_formatters: + if hasattr(log_formatter, "close"): + log_formatter.close() + + @classmethod + def get_logger(cls): + """Return the last initialized logger object.""" + return cls._instance - @staticmethod - def log(text: str, level: str = "INFO"): - """Placeholder logging method + def notify_slack(self, text: str): + """Send message to slack if enabled by config. Args: - text (str): text to log - level (str): logging level of the message text + text (str): text to send to slack """ - if level: - print(text) + if self.controller.config.logging.notify_slack: + self._slack_notifier.post_message(text) - def log_time(self, msg: str, level: str = "INFO", indent: bool = True): - """Log message with timestamp + def log(self, text: str, level: LogLevel = "INFO", indent: bool = True): + """Log text to file and display depending upon log level and config. Args: - msg (str): message text + text (str): text to log level (str): logging level - indent (bool): if true indent any messages based on the number of open contexts + indent (bool): if true indent text based on the number of open contexts + """ + timestamp = datetime.now().strftime("%d-%b-%Y (%H:%M:%S) ") + for log_formatter in self._log_formatters: + log_formatter.log(text, LEVELS_STR_TO_INT[level], indent, timestamp) + if self._use_emme_logbook and self.controller.has_emme: + self.controller.emme_manager.logbook_write(text) + + def trace(self, text: str, indent: bool = False): + """Log text with level=TRACE. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts + """ + self.log(text, "TRACE", indent) + + def debug(self, text: str, indent: bool = False): + """Log text with level=DEBUG. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts + """ + self.log(text, "DEBUG", indent) + + def detail(self, text: str, indent: bool = False): + """Log text with level=DETAIL. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts + """ + self.log(text, "DETAIL", indent) + + def info(self, text: str, indent: bool = False): + """Log text with level=INFO. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts + """ + self.log(text, "INFO", indent) + + def status(self, text: str, indent: bool = False): + """Log text with level=STATUS. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts + """ + self.log(text, "STATUS", indent) + + def warn(self, text: str, indent: bool = False): + """Log text with level=WARN. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts + """ + self.log(text, "WARN", indent) + + def error(self, text: str, indent: bool = False): + """Log text with level=ERROR. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts """ + self.log(text, "ERROR", indent) + + def fatal(self, text: str, indent: bool = False): + """Log text with level=FATAL. + + Args: + text (str): text to log + indent (bool): if true indent text based on the number of open contexts + """ + self.log(text, "FATAL", indent) + + def log_time(self, text: str, level=1, indent=False): + """Log message with timestamp""" timestamp = datetime.now().strftime("%d-%b-%Y (%H:%M:%S)") if indent: indent = " " * self._indentation - self.log(f"{timestamp}: {indent}{msg}", level) + self.log(f"{timestamp}: {indent}{text}", level) else: - self.log(f"{timestamp}: {msg}", level) + self.log(f"{timestamp}: {text}", level) - def log_start(self, msg: str, level: str = "INFO"): + def _log_start(self, text: str, level: LogLevel = "INFO"): """Log message with timestamp and 'Start'. Args: - msg (str): message text + text (str): message text level (str): logging level """ - self.log_time(f"Start {msg}", level, indent=True) - self._indentation += 1 + self.log(f"Start {text}", level, indent=True) + for log_formatter in self._log_formatters: + log_formatter.increase_indent(LEVELS_STR_TO_INT[level]) - def log_end(self, msg: str, level: str = "INFO"): + def _log_end(self, text: str, level: LogLevel = "INFO"): """Log message with timestamp and 'End'. Args: - msg (str): message text + text (str): message text level (str): logging level """ - self._indentation -= 1 - self.log_time(f"End {msg}", level, indent=True) + for log_formatter in self._log_formatters: + log_formatter.decrease_indent(LEVELS_STR_TO_INT[level]) + self.log(f"End {text}", level, indent=True) @_context - def log_start_end(self, msg: str, level: str = "INFO"): + def log_start_end(self, text: str, level: LogLevel = "STATUS"): """Use with 'with' statement to log the start and end time with message. + If using the Emme logbook (config.logging.use_emme_logbook is True), will + also create a logbook nest in the tree view using logbook_trace. + Args: - msg (str): message text + text (str): message text level (str): logging level """ - self.log_start(msg, level) + with self._skip_emme_logging(): + self._log_start(text, level) + if self._use_emme_logbook: + with self.controller.emme_manager.logbook_trace(text): + yield + else: + yield + with self._skip_emme_logging(): + self._log_end(text, level) + + def log_dict(self, mapping: dict, level: LogLevel = "DEBUG"): + """Format dictionary to string and log as text.""" + self.log(pformat(mapping, indent=1, width=120), level) + + @_context + def _skip_emme_logging(self): + """Temporary disable Emme logging (if enabled) and restore on exit. + + Intended use is with the log_start_end context and LogStartEnd decorator + to allow use of the Emme context without double logging of the + messages in the Emme logbook. + """ + self._use_emme_logbook, use_emme = False, self._use_emme_logbook yield - self.log_end(msg, level) + self._use_emme_logbook = use_emme + + def clear_msg_cache(self): + """Clear all log messages from cache.""" + self._log_cache.clear() + + @property + def debug_enabled(self) -> bool: + """Returns True if DEBUG is currently filtered for display or print to file. + + Can be used to enable / disable debug logging which may have a performance + impact. + """ + debug = LEVELS_STR_TO_INT["DEBUG"] + for log_formatter in self._log_formatters: + if log_formatter is not self._log_cache and log_formatter.level <= debug: + return True + return False + + @property + def trace_enabled(self) -> bool: + """Returns True if TRACE is currently filtered for display or print to file. + + Can be used to enable / disable trace logging which may have a performance + impact. + """ + trace = LEVELS_STR_TO_INT["TRACE"] + for log_formatter in self._log_formatters: + if log_formatter is not self._log_cache and log_formatter.level <= trace: + return True + return False + + +class LogFormatter: + """Base class for recording text to log. + + Properties: + indent: current indentation level for the LogFormatter + level: log filter level (as an int) + """ + + def __init__(self, level: int): + """Constructor for LogFormatter. + + Args: + level (int): log filter level (as an int) + """ + self._level = level + self.indent = 0 + + @property + def level(self): + """The current filter level for the LogFormatter.""" + return self._level + + def increase_indent(self, level: int): + """Increase current indent if the log level is filtered in.""" + if level >= self.level: + self.indent += 1 + + def decrease_indent(self, level: int): + """Decrease current indent if the log level is filtered in.""" + if level >= self.level: + self.indent -= 1 + + @abstractmethod + def log( + self, + text: str, + level: int, + indent: bool, + timestamp: Union[str, None], + ): + """Format and log message text. + + Args: + text (str): text to log + level (int): logging level + indent (bool): if true indent text based on the number of open contexts + timestamp (str): formatted datetime as a string or None + """ + + def _format_text( + self, + text: str, + level: int, + indent: bool, + timestamp: Union[str, None], + ): + """Format text for logging. + + Args: + text (str): text to format + level (int): logging level + indent (bool): if true indent text based on the number of open contexts and + timestamp width + timestamp (str): formatted datetime as a string or None for timestamp + """ + if timestamp is None: + timestamp = " " if indent else "" + if indent: + num_indents = self.indent + indent = " " * max(num_indents, 0) + else: + indent = "" + level_str = "{0:>6}".format(LEVELS_INT_TO_STR[level]) + return f"{timestamp}{level_str}: {indent}{text}" + + +class LogFile(LogFormatter): + """Format and write log text to file. + + Properties: + - level: the log level as an int + - file_path: the absolute file path to write to + """ + + def __init__(self, level: int, file_path: str): + """Constructor for LogFile object. + + Args: + level (int): the log level as an int. + file_path (str): the absolute file path to write to. + """ + super().__init__(level) + self.file_path = file_path + self.log_file = None + + def open(self): + """Open the log file for writing.""" + self.log_file = open(self.file_path, "w", encoding="utf8") + + def log(self, text: str, level: int, indent: bool, timestamp: Union[str, None]): + """Log text to file and display depending upon log level and config. + + Note that log will not write to file until opened with a context. + + Args: + text (str): text to log + level (int): logging level + indent (bool): if true indent text based on the number of open contexts + timestamp (str): formatted datetime as a string or None for timestamp + """ + if level >= self.level and self.log_file is not None: + text = self._format_text(text, level, indent, timestamp) + self.log_file.write(f"{text}\n") + self.log_file.flush() + + def close(self): + """Close the open log file.""" + self.log_file.close() + self.log_file = None + + +class LogFileLevelOverride(LogFile): + """Format and write log text to file. + + Properties: + - level: the log level as an int + - file_path: the absolute file path to write to + - iter_component_level: TODO + - controller: TODO + """ + + def __init__(self, level, file_path, iter_component_level, controller): + """Constructor for LogFileLevelOverride object. + + Args: + level (_type_): TODO + file_path (_type_): TODO + iter_component_level (_type_): TODO + controller (_type_): TODO + """ + super().__init__(level, file_path) + self.iter_component_level = iter_component_level + self.controller = controller + + @property + def level(self): + """Current log level with iter_component_level config override.""" + return self.iter_component_level.get( + self.controller.iter_component, self._level + ) + + +class LogDisplay(LogFormatter): + """Format and print log text to console / Notebook. + + Properties: + - level: the log level as an int + """ + + def log(self, text: str, level: int, indent: bool, timestamp: Union[str, None]): + """Format and display text on screen (print). + + Args: + text (str): text to log + level (int): logging level + indent (bool): if true indent text based on the number of open contexts + timestamp (str): formatted datetime as a string or None + """ + if level >= self.level: + print(self._format_text(text, level, indent, timestamp)) + + +class LogCache(LogFormatter): + """Caches all messages for later recording in on error logfile. + + Properties: + - file_path: the absolute file path to write to + """ + + def __init__(self, file_path: str): + """Constructor for LogCache object. + + Args: + file_path (str): the absolute file path to write to. + """ + super().__init__(level=0) + self.file_path = file_path + self._msg_cache = [] + + def open(self): + """Initialize log file (remove).""" + if os.path.exists(self.file_path): + os.remove(self.file_path) + + def log(self, text: str, level: int, indent: bool, timestamp: Union[str, None]): + """Format and store text for later recording. + + Args: + text (str): text to log + level (int): logging level + indent (bool): if true indent text based on the number of open contexts + timestamp (str): formatted datetime as a string or None + """ + self._msg_cache.append( + (level, self._format_text(text, level, indent, timestamp)) + ) + + def write_cache(self): + """Write all cached messages.""" + with open(self.file_path, "w", encoding="utf8") as file: + for level, text in self._msg_cache: + file.write(f"{LEVELS_INT_TO_STR[level]:6} {text}\n") + self.clear() + + def clear(self): + """Clear message cache.""" + self._msg_cache = [] # pylint: disable=too-few-public-methods @@ -78,25 +571,115 @@ def log_start_end(self, msg: str, level: str = "INFO"): class LogStartEnd: """Log the start and end time with optional message. - Used as a Component method decorator. If msg is not provided a default message - is generated with the object class and method name. + Used as a Component method decorator. If msg is not provided a default + message is generated with the object class and method name. - Args: - msg (str): message text to use in the start and end record - level (str): logging level + Example:: + @LogStartEnd("Highway assignment and skims", level="STATUS") + def run(self): + pass + + Properties: + text (str): message text to use in the start and end record. + level (str): logging level as a string. """ - def __init__(self, msg: str = None, level: str = "INFO"): - self.msg = msg + def __init__(self, text: str = None, level: str = "INFO"): + """Constructor for LogStartEnd object. + + Args: + text (str, optional): message text to use in the start and end record. + Defaults to None. + level (str, optional): logging level as a string. Defaults to "INFO". + """ + self.text = text self.level = level def __call__(self, func): + """Ability to call logger. + + Args: + func (_type_): _description_ + + Returns: + _type_: _description_ + """ + @functools.wraps(func) def wrapper(obj, *args, **kwargs): - msg = self.msg or obj.__class__.__name__ + " " + func.__name__ - obj.logger.log_start(msg, self.level) - value = func(obj, *args, **kwargs) - obj.logger.log_end(msg, self.level) + text = self.text or obj.__class__.__name__ + " " + func.__name__ + with obj.logger.log_start_end(text, self.level): + value = func(obj, *args, **kwargs) return value return wrapper + + +class SlackNotifier: + r"""Notify slack of model run status. + + The slack channel can be input directly, or is configured via text file found at + "M:\Software\Slack\TravelModel_SlackWebhook.txt" (if on MTC server) + rr"C:\Software\Slack\TravelModel_SlackWebhook.txt" (if local) + + Properties: + - logger (Logger): object for logging of trace messages + - slack_webhook_url (str): optional, url to use for sending the message to slack + """ + + def __init__(self, logger: Logger, slack_webhook_url: str = None): + r"""Constructor for SlackNotifier object. + + Args: + logger (Logger): logger instance. + slack_webhook_url (str, optional): . Defaults to None, which is replaced by either: + - r"M:\Software\Slack\TravelModel_SlackWebhook.txt" (if on MTC server) + - r"C:\Software\Slack\TravelModel_SlackWebhook.txt" (otherwise) + """ + self.logger = logger + if not logger.controller.config.logging.notify_slack: + self._slack_webhook_url = None + return + if slack_webhook_url is None: + hostname = socket.getfqdn() + if hostname.endswith(".mtc.ca.gov"): + slack_webhook_url_file = ( + r"M:\Software\Slack\TravelModel_SlackWebhook.txt" + ) + self.logger.log( + f"SlackNotifier running on mtc host; using {slack_webhook_url_file}", + level="TRACE", + ) + else: + slack_webhook_url_file = ( + r"C:\Software\Slack\TravelModel_SlackWebhook.txt" + ) + self.logger.log( + f"SlackNotifier running on non-mtc host; using {slack_webhook_url_file}", + level="TRACE", + ) + if os.path.isfile(slack_webhook_url_file): + with open(slack_webhook_url_file, "r", encoding="utf8") as url_file: + self._slack_webhook_url = url_file.read() + else: + self._slack_webhook_url = None + else: + self._slack_webhook_url = slack_webhook_url + self.logger.log( + f"SlackNotifier using slack webhook url {self._slack_webhook_url}", + level="TRACE", + ) + + def post_message(self, text): + """Posts text to the slack channel via the webhook if slack_webhook_url is found. + + Args: + text: text message to send to slack + """ + if self._slack_webhook_url is None: + return + headers = {"Content-type": "application/json"} + data = {"text": text} + self.logger.log(f"Sending message to slack: {text}", level="TRACE") + response = requests.post(self._slack_webhook_url, headers=headers, json=data) + self.logger.log(f"Receiving response: {response}", level="TRACE") diff --git a/tm2py/matrix.py b/tm2py/matrix.py new file mode 100644 index 00000000..279ca58a --- /dev/null +++ b/tm2py/matrix.py @@ -0,0 +1,91 @@ +"""Module with helpful matrix helper functions.""" + +from typing import Collection, Dict, Mapping, Optional, Union + +import numpy as np +import pandas as pd + +from tm2py.components.component import Subcomponent +from tm2py.config import MatrixFactorConfig + +NumpyArray = np.array + + +def create_matrix_factors( + matrix_factors: Collection[MatrixFactorConfig], + default_matrix: NumpyArray, + periods: Optional[float] = None, +) -> NumpyArray: + + adj_matrix = default_matrix + for adj in matrix_factors: + if adj.factor is not None: + _i_factor = adj.factor / 2.00 + _j_factor = adj.factor / 2.00 + else: + _i_factor = adj.i_factor + _j_factor = adj.j_factor + + if adj.as_growth_rate: + _i_factor = pow(_i_factor, periods) + _j_factor = pow(_j_factor, periods) + + if not adj.zone_index: + adj_matrix *= _i_factor + adj_matrix *= _j_factor + else: + zone_index = [z for z in adj.zone_index if z < adj_matrix.shape[0]] + adj_matrix[zone_index, :] *= _i_factor + adj_matrix[:, zone_index] *= _j_factor + + return adj_matrix + + +def factor_matrix( + matrix: Union[NumpyArray, pd.DataFrame], + matrix_factors: Collection[MatrixFactorConfig], + periods: Optional[float] = None, + decimals: Optional[int] = 2, +) -> Union[NumpyArray, pd.DataFrame]: + """Factor a matrix based on a MatrixFactorConfig and return factored matrix. + + Args: + matrix (Union[NumpyArray, pd.DataFrame]): A numpy array or pandas dataframe to factor. + config (MatrixFactorConfig): A collection of instances of MatrixFactorConfig + periods (Optional[float]): Time (usually in years) used to determine overall growth + from an annual growth rate. Required if config contains annual_growth_rate. + + Returns: + Union[NumpyArray, pd.DataFrame]: Matrix factored per config and if growth rate, years. + """ + + _default_matrix = np.ones(matrix.shape) + + adj_matrix = create_matrix_factors( + matrix_factors, + _default_matrix, + periods, + ) + + return np.around(matrix * adj_matrix, decimals=decimals) + + +def redim_matrix(matrix: NumpyArray, num_zones: int) -> NumpyArray: + """Pad numpy array with zeros to match expected square shape of zonexzone dimensions. + + Args: + matrix: NumpyArray to redimension + num_zones: expected shape to redimension to + """ + _shape = matrix.shape + if _shape < (num_zones, num_zones): + matrix = np.pad( + matrix, ((0, num_zones - _shape[0]), (0, num_zones - _shape[1])) + ) + elif _shape > (num_zones, num_zones): + ValueError( + f"Provided matrix is larger ({_shape}) than the \ + specified number of zones: {num_zones}" + ) + + return matrix diff --git a/tm2py/omx.py b/tm2py/omx.py new file mode 100644 index 00000000..f176ada5 --- /dev/null +++ b/tm2py/omx.py @@ -0,0 +1,100 @@ +from pathlib import Path +from typing import Collection, Dict, Mapping, Union + +import numpy as np +import openmatrix as _omx +import pandas as pd + +NumpyArray = np.array + + +def omx_to_dict( + omx_filename: Union[str, Path], + matrices: Union[Mapping[str, str], Collection[str]] = None, +) -> Dict[str, NumpyArray]: + """Reads OMX file and returns a dictionary matrix names mapped to NumpyArrays. + + Args: + omx_filename (Union[str,Path]): Filename of OMX file. + matrices Union[Mapping[str,str],Collection[str]], optional): Either a list of matrix names + to read or a dictionary mapping the output dictionary key to the matrix name in the + OMX file to map to it. Defaults to all in file. + + Returns: + Dict[str,NumpyArray]: _description_ + """ + + # if specified as a list, then turn into a dictionary + if isinstance(matrices, list): + _matrices = {m: m for m in matrices} + else: + _matrices = matrices + + omx_file = _omx.open_file(omx_filename) + + # check to make sure matrices are available in file + _avail_matrices = omx_file.list_matrices() + _req_matrices = list(matrices.values()) + + if _req_matrices: + if not set(_req_matrices).issubset(set(_avail_matrices)): + raise ValueError( + f"Not all specified matrices ({ _req_matrices}) found in omx file.\ + available matrices: {_avail_matrices}" + ) + else: + _matrices = {m: m for m in _avail_matrices} + + omx_dict = {key: omx_file[omx_name].read() for key, omx_name in _matrices.items()} + omx_file.close() + return omx_dict + + +def df_to_omx( + df: pd.DataFrame, + matrix_dict: Mapping[str, str], + omx_filename: str, + orig_column: str = "ORIG", + dest_column: str = "DEST", +): + """Export a dataframe to an OMX matrix file. + + Args: + df (pd.DataFrame): DataFrame to export. + omx_filename (str): OMX file to write to. + matrix_dict (Mapping[str, str]): Mapping of OMX matrix name to DF column name. + orig_column (str, optional): Origin column name. Defaults to "ORIG". + dest_column (str, optional): Destination column name. Defaults to "DEST". + """ + df = df.reset_index() + + # Get all used Zone IDs to produce index and zone mapping in OMX file + zone_ids = sorted(set(df[orig_column]).union(set(df[dest_column]))) + num_zones = len(zone_ids) + + # Map zone id to zone index # + zone_map = dict((z, i) for i, z in enumerate(zone_ids)) + + # calculate omx index of entries in numpy array list + df["omx_idx"] = df.apply( + lambda r: zone_map[r[orig_column]] * num_zones + zone_map[r[dest_column]], + axis=1, + ) + + _omx_file = _omx.open_file(omx_filename, "w") + _omx_file.create_mapping("zone_number", zone_ids) + + try: + for _name, _df_col in matrix_dict.items(): + _array = np.zeros(shape=(num_zones, num_zones)) + np.put( + _array, + df["omx_idx"].to_numpy(), + df[_df_col].to_numpy(), + ) + + _omx_file.create_matrix(_name, obj=_array) + + # TODO add logging + finally: + _omx_file.close() diff --git a/tm2py/tools.py b/tm2py/tools.py index f90eab7e..c6c6320a 100644 --- a/tm2py/tools.py +++ b/tm2py/tools.py @@ -1,45 +1,20 @@ """Tools module for common resources / shared code and "utilities" in the tm2py package.""" -from contextlib import contextmanager as _context import multiprocessing import os import re -import urllib.request +import subprocess as _subprocess +import tempfile import urllib.error import urllib.parse +import urllib.request import zipfile +from collections import defaultdict as _defaultdict +from contextlib import contextmanager as _context +from itertools import product as _product +from math import ceil, sqrt +from typing import Any, Collection, Mapping, Union -from typing import Union - - -def parse_num_processors(value: Union[str, int, float]): - """Convert input value (parse if string) to number of processors. - Args: - value: an int, float or string; string value can be "X" or "MAX-X" - Returns: - An int of the number of processors to use - - Raises: - Exception: Input value exceeds number of available processors - Exception: Input value less than 1 processors - """ - max_processors = multiprocessing.cpu_count() - if isinstance(value, str): - result = value.upper() - if result == "MAX": - return max_processors - if re.match("^[0-9]+$", value): - return int(value) - result = re.split(r"^MAX[\s]*-[\s]*", result) - if len(result) == 2: - return max(max_processors - int(result[1]), 1) - raise Exception(f"Input value {value} is an int or string as 'MAX-X'") - - result = int(value) - if result > max_processors: - raise Exception(f"Input value {value} greater than available processors") - if result < 1: - raise Exception(f"Input value {value} less than 1 processors") - return value +import pandas as pd @_context @@ -60,26 +35,31 @@ def _urlopen(url): request = urllib.request.Request(url) # Handle Redirects using solution shown by user: metatoaster on StackOverflow # https://stackoverflow.com/questions/62384020/python-3-7-urllib-request-doesnt-follow-redirect-url + print(f"Opening URL: {url}") try: with urllib.request.urlopen(request) as response: + print(f"No redirects found.") yield response except urllib.error.HTTPError as error: - print("redirect error") + print("Redirect Error") if error.status != 307: raise ValueError(f"HTTP Error {error.status}") from error redirected_url = urllib.parse.urljoin(url, error.headers["Location"]) + print(f"Redirected to: {redirected_url}") with urllib.request.urlopen(redirected_url) as response: yield response def _download(url: str, target_destination: str): - """Download file with redirects (i.e. box) + """Download file with redirects (i.e. box). Args: url (str): source URL to download data from target_destination (str): destination file path to save download """ with _urlopen(url) as response: + total_length = int(response.headers.get("content-length")) + print(f"Total Download Size: {total_length}") with open(target_destination, "wb") as out_file: out_file.write(response.read()) @@ -98,7 +78,7 @@ def _unzip(target_zip: str, target_dir: str): def download_unzip( url: str, out_base_dir: str, target_dir: str, zip_filename: str = "test_data.zip" ) -> None: - """Downloads and unzips a file from a URL. The zip file is removed after extraction. + """Download and unzips a file from a URL. The zip file is removed after extraction. Args: url (str): Full URL do download from. @@ -113,3 +93,328 @@ def download_unzip( _download(url, target_zip) _unzip(target_zip, target_dir) os.remove(target_zip) + + +@_context +def temp_file(mode: str = "w+", prefix: str = "", suffix: str = ""): + """Temp file wrapper to return open file handle and named path. + + A named temporary file (using mkstemp) with specified prefix and + suffix is created and opened with the specified mode. The file + handle and path are returned. The file is closed and deleted on exit. + + Args: + mode: mode to open file, [rw][+][b] + prefix: optional text to start temp file name + suffix: optional text to end temp file name + """ + file_ref, file_path = tempfile.mkstemp(prefix=prefix, suffix=suffix) + file = os.fdopen(file_ref, mode=mode) + try: + yield file, file_path + finally: + if not file.closed: + file.close() + os.remove(file_path) + + +def run_process(commands: Collection[str], name: str = ""): + """Run system level commands as blocking process and log output and error messages. + + Args: + commands: list of one or more commands to execute + name: optional name to use for the temp bat file + """ + # when merged with develop_logging branch can use get_logger + # logger = Logger.get_logger + logger = None + with temp_file("w", prefix=name, suffix=".bat") as (bat_file, bat_file_path): + bat_file.write("\n".join(commands)) + bat_file.close() + if logger: + # temporary file to capture output error messages generated by Java + # Note: temp file created in the current working directory + with temp_file(mode="w+", suffix="_error.log") as (err_file, _): + try: + output = _subprocess.check_output( + bat_file_path, stderr=err_file, shell=True + ) + logger.log(output.decode("utf-8")) + except _subprocess.CalledProcessError as error: + logger.log(error.output) + raise + finally: + err_file.seek(0) + error_msg = err_file.read() + if error_msg: + logger.log(error_msg) + else: + _subprocess.check_call(bat_file_path, shell=True) + + +def interpolate_dfs( + df: pd.DataFrame, + ref_points: Collection[Union[float, int]], + target_point: Union[float, int], + ref_col_name: str = "ends_with", +) -> pd.DataFrame: + """Interpolate for the model year assuming linear growth between the reference years. + + Args: + df (pd.DataFrame): dataframe to interpolate on, with ref points contained in column + name per ref_col_name. + ref_points (Collection[Union[float,int]]): reference years to interpolate between + target_point (Union[float,int]): target year + ref_col_name (str, optional): column name to use for reference years. + Defaults to "ends_with". + """ + if ref_col_name not in ["ends_with"]: + raise NotImplementedError(f"{ref_col_name} not implemented") + if len(ref_points) != 2: + raise NotImplementedError(f"{ref_points} reference points not implemented") + + _ref_points = list(map(int, ref_points)) + _target_point = int(target_point) + + _ref_points.sort() + _start_point, _end_point = _ref_points + if not _start_point <= _target_point <= _end_point: + raise ValueError( + f"Target Point: {_target_point} not within range of \ + Reference Points: {_ref_points}" + ) + + _start_ref_df = df[[c for c in df.columns if c.endswith(f"{_start_point}")]].copy() + _end_ref_df = df[[c for c in df.columns if c.endswith(f"{_end_point}")]].copy() + + if len(_start_ref_df.columns) != len(_end_ref_df.columns): + raise ValueError( + f"{_start_point} and {_end_point} have different number of columns:\n\ + {_start_point} Columns: {_start_ref_df.columns}\n\ + {_end_point} Columns: {_end_ref_df.columns}\ + " + ) + + _start_ref_df.rename( + columns=lambda x: x.replace(f"_{_start_point}", ""), inplace=True + ) + _end_ref_df.rename(columns=lambda x: x.replace(f"_{_end_point}", ""), inplace=True) + _scale_factor = float(target_point - _start_point) / (_end_point - _start_point) + + interpolated_df = (1 - _scale_factor) * _start_ref_df + _scale_factor * _end_ref_df + + return interpolated_df + + +def zonal_csv_to_matrices( + csv_file: str, + i_column: str = "ORIG", + j_column: str = "DEST", + value_columns: str = ["VALUE"], + default_value: float = 0.0, + fill_zones: bool = False, + max_zone: int = None, + delimiter: str = ",", +) -> Mapping[str, pd.DataFrame]: + """Read a CSV file with zonal data and into dataframes. + + Input CSV file should have a header row specifying the I, J, and Value column names. + + Args: + csv_file (str): _description_ + i_column (str, optional): Name of j zone column. Defaults to "ORIG". + j_column (str, optional): Name of i zone column. Defaults to "DEST". + value_columns (str, optional): List of columns to turn into matrices. + Defaults to ["VALUE"]. + default_value (float, optional): Value to fill empty cells with. Defaults to 0.0. + fill_zones (bool, optional): If true, will fill zones without values to max zone with + default value. Defaults to False. + max_zone (int, optional): If fill_zones is True, used to determine matrix size. + Defaults to max(I, J). + delimiter (str, optional): Input file delimeter. Defaults to ",". + + Returns: + dict: Dictionary of Pandas dataframes with matrix names as keys. + """ + # TODO Create a test + _df = pd.read_csv(csv_file, delimiter=delimiter) + _df_idx = _df.set_index([i_column, j_column]) + + _dfs_dict = {v: _df_idx[v] for v in value_columns} + if not fill_zones: + return _dfs_dict + + if max_zone is None: + max_zone = _df[[i_column, j_column]].max().max() + + _zone_list = list(range(1, max_zone + 1)) + for v, _df in _dfs_dict.items(): + _df[v].reindex(index=_zone_list, columns=_zone_list, fill_value=default_value) + return _dfs_dict + + +def mocked_inro_context(): + """Mocking of modules which need to be mocked for tests.""" + import sys + from unittest.mock import MagicMock + + sys.modules["inro.emme.database.emmebank"] = MagicMock() + sys.modules["inro.emme.database.emmebank.path"] = MagicMock(return_value=".") + sys.modules["inro.emme.network.link"] = MagicMock() + sys.modules["inro.emme.network.mode"] = MagicMock() + sys.modules["inro.emme.network.node"] = MagicMock() + sys.modules["inro.emme.network"] = MagicMock() + sys.modules["inro.emme.database.scenario"] = MagicMock() + sys.modules["inro.emme.database.matrix"] = MagicMock() + sys.modules["inro.emme.network.node"] = MagicMock() + sys.modules["inro.emme.desktop.app"] = MagicMock() + sys.modules["inro"] = MagicMock() + sys.modules["inro.modeller"] = MagicMock() + sys.modules["tm2py.emme.manager.EmmeManager.project"] = MagicMock() + sys.modules["tm2py.emme.manager.EmmeManager.emmebank"] = MagicMock() + sys.modules["tm2py.emme.manager"] = MagicMock() + + +def emme_context(): + """Return True if Emme is installed.""" + import pkg_resources + + _inro_package = "inro-emme" + _avail_packages = [pkg.key for pkg in pkg_resources.working_set] + + if _inro_package not in _avail_packages: + print("Inro not found. Skipping inro setup.") + mocked_inro_context() + return False + else: + import inro + + if "MagicMock" in str(type(inro)): + return False + + return True + + +def parse_num_processors(value: [str, int, float]): + """Parse input value string "MAX-X" to number of available processors. + + Used with Emme procedures (traffic and transit assignments, matrix + caculator, etc.) Does not raise any specific errors. + + Args: + value: int, float or string; string value can be "X" or "MAX-X" + """ + max_processors = multiprocessing.cpu_count() + if isinstance(value, str): + value = value.upper() + if value == "MAX": + return max_processors + if re.match("^[0-9]+$", value): + return int(value) + result = re.split(r"^MAX[\s]*-[\s]*", value) + if len(result) == 2: + return max(max_processors - int(result[1]), 1) + else: + return int(value) + return value + + +class SpatialGridIndex: + """ + Simple spatial grid hash for fast (enough) nearest neighbor / within distance searches of points. + """ + + def __init__(self, size: float): + """ + Args: + size: the size of the grid to use for the index, relative to the point coordinates + """ + self._size = float(size) + self._grid_index = _defaultdict(lambda: []) + + def insert(self, obj: Any, x: float, y: float): + """ + Add new obj with coordinates x and y. + Args: + obj: any python object, will be returned from search methods "nearest" and "within_distance" + x: x-coordinate + y: y-coordinate + """ + grid_x, grid_y = round(x / self._size), round(y / self._size) + self._grid_index[(grid_x, grid_y)].append((obj, x, y)) + + def nearest(self, x: float, y: float): + """Return the closest object in index to the specified coordinates + Args: + x: x-coordinate + y: y-coordinate + """ + if len(self._grid_index) == 0: + raise Exception("SpatialGrid is empty.") + + def calc_dist(x1, y1, x2, y2): + return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + + grid_x, grid_y = round(x / self._size), round(y / self._size) + step = 0 + done = False + found_items = [] + while not done: + search_offsets = list(range(-1 * step, step + 1)) + search_offsets = _product(search_offsets, search_offsets) + items = [] + for x_offset, y_offset in search_offsets: + if abs(x_offset) != step and abs(y_offset) != step: + continue # already checked this grid tile + items.extend(self._grid_index[grid_x + x_offset, grid_y + y_offset]) + if found_items: + done = True + found_items.extend(items) + step += 1 + min_dist = 1e400 + closest = None + for i, xi, yi in found_items: + dist = calc_dist(x, y, xi, yi) + if dist < min_dist: + closest = i + min_dist = dist + return closest + + def within_distance(self, x: float, y: float, distance: float): + """Return all objects in index within the distance of the specified coordinates + Args: + x: x-coordinate + y: y-coordinate + distance: distance to search in point coordinate units + """ + + def point_in_circle(x1, y1, x2, y2, dist): + return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) <= dist + + return self._get_items_on_grid(x, y, distance, point_in_circle) + + def within_square(self, x: float, y: float, distance: float): + """Return all objects in index within a square box distance of the specified coordinates. + Args: + x: x-coordinate + y: y-coordinate + distance: distance to search in point coordinate units + """ + + def point_in_box(x1, y1, x2, y2, dist): + return abs(x1 - x2) <= dist and abs(y1 - y2) <= dist + + return self._get_items_on_grid(x, y, distance, point_in_box) + + def _get_items_on_grid(self, x, y, distance, filter_func): + grid_x, grid_y = round(x / self._size), round(y / self._size) + num_search_grids = ceil(distance / self._size) + search_offsets = list(range(-1 * num_search_grids, num_search_grids + 1)) + search_offsets = list(_product(search_offsets, search_offsets)) + items = [] + for x_offset, y_offset in search_offsets: + items.extend(self._grid_index[grid_x + x_offset, grid_y + y_offset]) + filtered_items = [ + i for i, xi, yi in items if filter_func(x, y, xi, yi, distance) + ] + return filtered_items