From 9fa8658abd0ae0df55d16617f09ac67b96da2054 Mon Sep 17 00:00:00 2001 From: Andrew Latham Date: Fri, 22 Nov 2024 10:32:12 -0800 Subject: [PATCH] Updating tutorial --- .../template.spatiotemporal-checkpoint.ipynb | 33 +- Jupyter/.template.spatiotemporal.ipynb | 33 +- Jupyter/Doxyfile | 410 +++ Jupyter/html/bc_s.png | Bin 0 -> 676 bytes Jupyter/html/bc_sd.png | Bin 0 -> 635 bytes Jupyter/html/clipboard.js | 61 + Jupyter/html/closed.png | Bin 0 -> 132 bytes Jupyter/html/doc.svg | 12 + Jupyter/html/docd.svg | 12 + Jupyter/html/doxygen.css | 2244 +++++++++++++++++ Jupyter/html/doxygen.svg | 28 + Jupyter/html/doxygen_crawl.html | 20 + Jupyter/html/dynsections.js | 198 ++ Jupyter/html/folderclosed.svg | 11 + Jupyter/html/folderclosedd.svg | 11 + Jupyter/html/folderopen.svg | 17 + Jupyter/html/folderopend.svg | 12 + Jupyter/html/images | 1 + Jupyter/html/index.html | 111 + Jupyter/html/jquery.js | 34 + Jupyter/html/menu.js | 134 + Jupyter/html/menudata.js | 29 + Jupyter/html/minus.svg | 8 + Jupyter/html/minusd.svg | 8 + Jupyter/html/nav_f.png | Bin 0 -> 153 bytes Jupyter/html/nav_fd.png | Bin 0 -> 169 bytes Jupyter/html/nav_g.png | Bin 0 -> 95 bytes Jupyter/html/nav_h.png | Bin 0 -> 98 bytes Jupyter/html/nav_hd.png | Bin 0 -> 114 bytes Jupyter/html/navtree.css | 149 ++ Jupyter/html/notebook.html | 1010 ++++++++ Jupyter/html/open.png | Bin 0 -> 123 bytes Jupyter/html/pages.html | 112 + Jupyter/html/plus.svg | 9 + Jupyter/html/plusd.svg | 9 + Jupyter/html/resize.js | 145 ++ Jupyter/html/splitbar.png | Bin 0 -> 314 bytes Jupyter/html/splitbard.png | Bin 0 -> 282 bytes Jupyter/html/sync_off.png | Bin 0 -> 853 bytes Jupyter/html/sync_on.png | Bin 0 -> 845 bytes Jupyter/html/tab_a.png | Bin 0 -> 142 bytes Jupyter/html/tab_ad.png | Bin 0 -> 135 bytes Jupyter/html/tab_b.png | Bin 0 -> 169 bytes Jupyter/html/tab_bd.png | Bin 0 -> 173 bytes Jupyter/html/tab_h.png | Bin 0 -> 177 bytes Jupyter/html/tab_hd.png | Bin 0 -> 180 bytes Jupyter/html/tab_s.png | Bin 0 -> 184 bytes Jupyter/html/tab_sd.png | Bin 0 -> 188 bytes Jupyter/html/tabs.css | 1 + Jupyter/spatiotemporal-colab.ipynb | 1308 ++++++++++ ...tiotemporal.ipynb => spatiotemporal.ipynb} | 512 ++-- Jupyter/spatiotemporal.py | 583 +++++ 52 files changed, 7067 insertions(+), 198 deletions(-) create mode 100644 Jupyter/Doxyfile create mode 100644 Jupyter/html/bc_s.png create mode 100644 Jupyter/html/bc_sd.png create mode 100644 Jupyter/html/clipboard.js create mode 100644 Jupyter/html/closed.png create mode 100644 Jupyter/html/doc.svg create mode 100644 Jupyter/html/docd.svg create mode 100644 Jupyter/html/doxygen.css create mode 100644 Jupyter/html/doxygen.svg create mode 100644 Jupyter/html/doxygen_crawl.html create mode 100644 Jupyter/html/dynsections.js create mode 100644 Jupyter/html/folderclosed.svg create mode 100644 Jupyter/html/folderclosedd.svg create mode 100644 Jupyter/html/folderopen.svg create mode 100644 Jupyter/html/folderopend.svg create mode 120000 Jupyter/html/images create mode 100644 Jupyter/html/index.html create mode 100644 Jupyter/html/jquery.js create mode 100644 Jupyter/html/menu.js create mode 100644 Jupyter/html/menudata.js create mode 100644 Jupyter/html/minus.svg create mode 100644 Jupyter/html/minusd.svg create mode 100644 Jupyter/html/nav_f.png create mode 100644 Jupyter/html/nav_fd.png create mode 100644 Jupyter/html/nav_g.png create mode 100644 Jupyter/html/nav_h.png create mode 100644 Jupyter/html/nav_hd.png create mode 100644 Jupyter/html/navtree.css create mode 100644 Jupyter/html/notebook.html create mode 100644 Jupyter/html/open.png create mode 100644 Jupyter/html/pages.html create mode 100644 Jupyter/html/plus.svg create mode 100644 Jupyter/html/plusd.svg create mode 100644 Jupyter/html/resize.js create mode 100644 Jupyter/html/splitbar.png create mode 100644 Jupyter/html/splitbard.png create mode 100644 Jupyter/html/sync_off.png create mode 100644 Jupyter/html/sync_on.png create mode 100644 Jupyter/html/tab_a.png create mode 100644 Jupyter/html/tab_ad.png create mode 100644 Jupyter/html/tab_b.png create mode 100644 Jupyter/html/tab_bd.png create mode 100644 Jupyter/html/tab_h.png create mode 100644 Jupyter/html/tab_hd.png create mode 100644 Jupyter/html/tab_s.png create mode 100644 Jupyter/html/tab_sd.png create mode 100644 Jupyter/html/tabs.css create mode 100644 Jupyter/spatiotemporal-colab.ipynb rename Jupyter/{template.spatiotemporal.ipynb => spatiotemporal.ipynb} (83%) create mode 100755 Jupyter/spatiotemporal.py diff --git a/Jupyter/.ipynb_checkpoints/template.spatiotemporal-checkpoint.ipynb b/Jupyter/.ipynb_checkpoints/template.spatiotemporal-checkpoint.ipynb index c07d0fcdb..c39009d4f 100644 --- a/Jupyter/.ipynb_checkpoints/template.spatiotemporal-checkpoint.ipynb +++ b/Jupyter/.ipynb_checkpoints/template.spatiotemporal-checkpoint.ipynb @@ -81,6 +81,9 @@ "source": [ "# General imports for the tutorial\n", "import sys, os, glob, shutil\n", + "import IMP\n", + "import RMF\n", + "import IMP.rmf\n", "from IMP.spatiotemporal import prepare_protein_library\n", "import IMP.spatiotemporal as spatiotemporal\n", "from IMP.spatiotemporal import analysis\n", @@ -97,11 +100,11 @@ "source": [ "# parameters for prepare_protein_library:\n", "times = [\"0min\", \"1min\", \"2min\"]\n", - "exp_comp = {'A': '../../../modeling/Input_Information/gen_FCS/exp_compA.csv',\n", - " 'B': '../../../modeling/Input_Information/gen_FCS/exp_compB.csv',\n", - " 'C': '../../../modeling/Input_Information/gen_FCS/exp_compC.csv'}\n", + "exp_comp = {'A': '../modeling/Input_Information/gen_FCS/exp_compA.csv',\n", + " 'B': '../modeling/Input_Information/gen_FCS/exp_compB.csv',\n", + " 'C': '../modeling/Input_Information/gen_FCS/exp_compC.csv'}\n", "expected_subcomplexes = ['A', 'B', 'C']\n", - "template_topology = 'spatiotemporal_topology.txt'\n", + "template_topology = '../modeling/Heterogeneity/Heterogeneity_Modeling/spatiotemporal_topology.txt'\n", "template_dict = {'A': ['Ubi-E2-D3'], 'B': ['BMI-1'], 'C': ['E3-ubi-RING2']}\n", "nmodels = 3\n", "\n", @@ -633,7 +636,7 @@ "main_dir = os.getcwd()\n", "os.chdir(main_dir)\n", "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", - "create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../../../modeling/Input_Information/gen_FCS/', custom_source_dir3='../../../modeling/Snapshots/Snapshots_Modeling/')\n", + "create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../modeling/Input_Information/gen_FCS/', custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", "\n", "# then trajectory model is created based on the all copied data\n", "expected_subcomplexes = ['A', 'B', 'C']\n", @@ -805,10 +808,11 @@ " except Exception as e:\n", " print(f\"scoresA.txt and scoresB.txt cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", "\n", + "os.chdir(main_dir)\n", "# copy all the relevant files\n", - "copy_files_for_data(state_dict, custom_source_dir1='../../../modeling/Heterogeneity/Heterogeneity_Modeling/',\n", - " custom_source_dir2='../../../modeling/Input_Information/gen_FCS/',\n", - " custom_source_dir3='../../../modeling/Snapshots/Snapshots_Modeling/')\n", + "copy_files_for_data(state_dict, custom_source_dir1='../modeling/Heterogeneity/Heterogeneity_Modeling/',\n", + " custom_source_dir2='../modeling/Input_Information/gen_FCS/',\n", + " custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", "\n", "# create two independent DAGs\n", "expected_subcomplexes = ['A', 'B', 'C']\n", @@ -824,7 +828,7 @@ "nodesA, graphA, graph_probA, graph_scoresA = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", " input_dir=input, scorestr='_scoresA.log',\n", " output_dir=outputA,\n", - " spatio_temporal_rule=False,\n", + " spatio_temporal_rule=True,\n", " expected_subcomplexes=expected_subcomplexes,\n", " score_comp=True, exp_comp_map=exp_comp,\n", " draw_dag=False)\n", @@ -833,7 +837,7 @@ "nodesB, graphB, graph_probB, graph_scoresB = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", " input_dir=input, scorestr='_scoresB.log',\n", " output_dir=outputB,\n", - " spatio_temporal_rule=False,\n", + " spatio_temporal_rule=True,\n", " expected_subcomplexes=expected_subcomplexes,\n", " score_comp=True, exp_comp_map=exp_comp,\n", " draw_dag=False)\n", @@ -996,7 +1000,7 @@ " \"\"\"\n", " # find folder with config files\n", " if custom_labeled_pdf:\n", - " _labeled_pdf = custom_data_folder\n", + " _labeled_pdf = custom_labeled_pdf\n", " else:\n", " _labeled_pdf = '../Trajectories_Modeling/output/labeled_pdf.txt'\n", "\n", @@ -1037,7 +1041,7 @@ "\n", "# 3b - comparison of the model to data used in modeling (copy number)\n", "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", - "forward_model_copy_number(expected_subcomplexes)\n", + "forward_model_copy_number(expected_subcomplexes,custom_labeled_pdf='output/labeled_pdf.txt')\n", "print(\"Step 3b: copy number validation IS COMPLETED\")\n", "print(\"\")\n", "print(\"\")" @@ -1099,7 +1103,7 @@ " if custom_path:\n", " sim_rmf = custom_path # option for custom path\n", " else:\n", - " sim_rmf = f\"../../../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3\"\n", + " sim_rmf = f\"../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3\"\n", "\n", " pdb_output = f\"snapshot{state}_{time}.pdb\" # define the output of converted .pdb file\n", "\n", @@ -1215,8 +1219,9 @@ "SAXS_output = \"./SAXS_comparison/\"\n", "os.makedirs(SAXS_output, exist_ok=True)\n", "os.chdir(SAXS_output)\n", + "model = IMP.Model()\n", "convert_rmfs(state_dict, model)\n", - "copy_SAXS_dat_files(custom_src_dir='../../../../modeling/Input_Information/gen_SAXS')\n", + "copy_SAXS_dat_files(custom_src_dir='../../modeling/Input_Information/gen_SAXS')\n", "process_foxs(state_dict)\n", "print(\"Step 4a: SAXS validation IS COMPLETED\")\n", "print(\"\")\n", diff --git a/Jupyter/.template.spatiotemporal.ipynb b/Jupyter/.template.spatiotemporal.ipynb index c07d0fcdb..c39009d4f 100644 --- a/Jupyter/.template.spatiotemporal.ipynb +++ b/Jupyter/.template.spatiotemporal.ipynb @@ -81,6 +81,9 @@ "source": [ "# General imports for the tutorial\n", "import sys, os, glob, shutil\n", + "import IMP\n", + "import RMF\n", + "import IMP.rmf\n", "from IMP.spatiotemporal import prepare_protein_library\n", "import IMP.spatiotemporal as spatiotemporal\n", "from IMP.spatiotemporal import analysis\n", @@ -97,11 +100,11 @@ "source": [ "# parameters for prepare_protein_library:\n", "times = [\"0min\", \"1min\", \"2min\"]\n", - "exp_comp = {'A': '../../../modeling/Input_Information/gen_FCS/exp_compA.csv',\n", - " 'B': '../../../modeling/Input_Information/gen_FCS/exp_compB.csv',\n", - " 'C': '../../../modeling/Input_Information/gen_FCS/exp_compC.csv'}\n", + "exp_comp = {'A': '../modeling/Input_Information/gen_FCS/exp_compA.csv',\n", + " 'B': '../modeling/Input_Information/gen_FCS/exp_compB.csv',\n", + " 'C': '../modeling/Input_Information/gen_FCS/exp_compC.csv'}\n", "expected_subcomplexes = ['A', 'B', 'C']\n", - "template_topology = 'spatiotemporal_topology.txt'\n", + "template_topology = '../modeling/Heterogeneity/Heterogeneity_Modeling/spatiotemporal_topology.txt'\n", "template_dict = {'A': ['Ubi-E2-D3'], 'B': ['BMI-1'], 'C': ['E3-ubi-RING2']}\n", "nmodels = 3\n", "\n", @@ -633,7 +636,7 @@ "main_dir = os.getcwd()\n", "os.chdir(main_dir)\n", "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", - "create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../../../modeling/Input_Information/gen_FCS/', custom_source_dir3='../../../modeling/Snapshots/Snapshots_Modeling/')\n", + "create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../modeling/Input_Information/gen_FCS/', custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", "\n", "# then trajectory model is created based on the all copied data\n", "expected_subcomplexes = ['A', 'B', 'C']\n", @@ -805,10 +808,11 @@ " except Exception as e:\n", " print(f\"scoresA.txt and scoresB.txt cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", "\n", + "os.chdir(main_dir)\n", "# copy all the relevant files\n", - "copy_files_for_data(state_dict, custom_source_dir1='../../../modeling/Heterogeneity/Heterogeneity_Modeling/',\n", - " custom_source_dir2='../../../modeling/Input_Information/gen_FCS/',\n", - " custom_source_dir3='../../../modeling/Snapshots/Snapshots_Modeling/')\n", + "copy_files_for_data(state_dict, custom_source_dir1='../modeling/Heterogeneity/Heterogeneity_Modeling/',\n", + " custom_source_dir2='../modeling/Input_Information/gen_FCS/',\n", + " custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", "\n", "# create two independent DAGs\n", "expected_subcomplexes = ['A', 'B', 'C']\n", @@ -824,7 +828,7 @@ "nodesA, graphA, graph_probA, graph_scoresA = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", " input_dir=input, scorestr='_scoresA.log',\n", " output_dir=outputA,\n", - " spatio_temporal_rule=False,\n", + " spatio_temporal_rule=True,\n", " expected_subcomplexes=expected_subcomplexes,\n", " score_comp=True, exp_comp_map=exp_comp,\n", " draw_dag=False)\n", @@ -833,7 +837,7 @@ "nodesB, graphB, graph_probB, graph_scoresB = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", " input_dir=input, scorestr='_scoresB.log',\n", " output_dir=outputB,\n", - " spatio_temporal_rule=False,\n", + " spatio_temporal_rule=True,\n", " expected_subcomplexes=expected_subcomplexes,\n", " score_comp=True, exp_comp_map=exp_comp,\n", " draw_dag=False)\n", @@ -996,7 +1000,7 @@ " \"\"\"\n", " # find folder with config files\n", " if custom_labeled_pdf:\n", - " _labeled_pdf = custom_data_folder\n", + " _labeled_pdf = custom_labeled_pdf\n", " else:\n", " _labeled_pdf = '../Trajectories_Modeling/output/labeled_pdf.txt'\n", "\n", @@ -1037,7 +1041,7 @@ "\n", "# 3b - comparison of the model to data used in modeling (copy number)\n", "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", - "forward_model_copy_number(expected_subcomplexes)\n", + "forward_model_copy_number(expected_subcomplexes,custom_labeled_pdf='output/labeled_pdf.txt')\n", "print(\"Step 3b: copy number validation IS COMPLETED\")\n", "print(\"\")\n", "print(\"\")" @@ -1099,7 +1103,7 @@ " if custom_path:\n", " sim_rmf = custom_path # option for custom path\n", " else:\n", - " sim_rmf = f\"../../../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3\"\n", + " sim_rmf = f\"../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3\"\n", "\n", " pdb_output = f\"snapshot{state}_{time}.pdb\" # define the output of converted .pdb file\n", "\n", @@ -1215,8 +1219,9 @@ "SAXS_output = \"./SAXS_comparison/\"\n", "os.makedirs(SAXS_output, exist_ok=True)\n", "os.chdir(SAXS_output)\n", + "model = IMP.Model()\n", "convert_rmfs(state_dict, model)\n", - "copy_SAXS_dat_files(custom_src_dir='../../../../modeling/Input_Information/gen_SAXS')\n", + "copy_SAXS_dat_files(custom_src_dir='../../modeling/Input_Information/gen_SAXS')\n", "process_foxs(state_dict)\n", "print(\"Step 4a: SAXS validation IS COMPLETED\")\n", "print(\"\")\n", diff --git a/Jupyter/Doxyfile b/Jupyter/Doxyfile new file mode 100644 index 000000000..f407a1e11 --- /dev/null +++ b/Jupyter/Doxyfile @@ -0,0 +1,410 @@ +# Doxyfile 1.11.0 + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- +DOXYFILE_ENCODING = UTF-8 +PROJECT_NAME = "IMP Tutorial" +PROJECT_NUMBER = +PROJECT_BRIEF = +PROJECT_LOGO = +PROJECT_ICON = +OUTPUT_DIRECTORY = +CREATE_SUBDIRS = NO +CREATE_SUBDIRS_LEVEL = 8 +ALLOW_UNICODE_NAMES = NO +OUTPUT_LANGUAGE = English +BRIEF_MEMBER_DESC = YES +REPEAT_BRIEF = YES +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the +ALWAYS_DETAILED_SEC = NO +INLINE_INHERITED_MEMB = NO +FULL_PATH_NAMES = YES +STRIP_FROM_PATH = +STRIP_FROM_INC_PATH = +SHORT_NAMES = NO +JAVADOC_AUTOBRIEF = NO +JAVADOC_BANNER = NO +QT_AUTOBRIEF = NO +MULTILINE_CPP_IS_BRIEF = NO +PYTHON_DOCSTRING = YES +INHERIT_DOCS = YES +SEPARATE_MEMBER_PAGES = NO +TAB_SIZE = 4 +ALIASES = +OPTIMIZE_OUTPUT_FOR_C = NO +OPTIMIZE_OUTPUT_JAVA = NO +OPTIMIZE_FOR_FORTRAN = NO +OPTIMIZE_OUTPUT_VHDL = NO +OPTIMIZE_OUTPUT_SLICE = NO +EXTENSION_MAPPING = +MARKDOWN_SUPPORT = YES +TOC_INCLUDE_HEADINGS = 2 +MARKDOWN_ID_STYLE = DOXYGEN +AUTOLINK_SUPPORT = NO +BUILTIN_STL_SUPPORT = NO +CPP_CLI_SUPPORT = NO +SIP_SUPPORT = NO +IDL_PROPERTY_SUPPORT = YES +DISTRIBUTE_GROUP_DOC = NO +GROUP_NESTED_COMPOUNDS = NO +SUBGROUPING = YES +INLINE_GROUPED_CLASSES = NO +INLINE_SIMPLE_STRUCTS = NO +TYPEDEF_HIDES_STRUCT = NO +LOOKUP_CACHE_SIZE = 0 +NUM_PROC_THREADS = 1 +TIMESTAMP = NO +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- +EXTRACT_ALL = NO +EXTRACT_PRIVATE = NO +EXTRACT_PRIV_VIRTUAL = NO +EXTRACT_PACKAGE = NO +EXTRACT_STATIC = NO +EXTRACT_LOCAL_CLASSES = YES +EXTRACT_LOCAL_METHODS = NO +EXTRACT_ANON_NSPACES = NO +RESOLVE_UNNAMED_PARAMS = YES +HIDE_UNDOC_MEMBERS = NO +HIDE_UNDOC_CLASSES = NO +HIDE_FRIEND_COMPOUNDS = NO +HIDE_IN_BODY_DOCS = NO +INTERNAL_DOCS = NO +CASE_SENSE_NAMES = SYSTEM +HIDE_SCOPE_NAMES = NO +HIDE_COMPOUND_REFERENCE= NO +SHOW_HEADERFILE = YES +SHOW_INCLUDE_FILES = YES +SHOW_GROUPED_MEMB_INC = NO +FORCE_LOCAL_INCLUDES = NO +INLINE_INFO = YES +SORT_MEMBER_DOCS = YES +SORT_BRIEF_DOCS = NO +SORT_MEMBERS_CTORS_1ST = NO +SORT_GROUP_NAMES = NO +SORT_BY_SCOPE_NAME = NO +STRICT_PROTO_MATCHING = NO +GENERATE_TODOLIST = YES +GENERATE_TESTLIST = YES +GENERATE_BUGLIST = YES +GENERATE_DEPRECATEDLIST= YES +ENABLED_SECTIONS = +MAX_INITIALIZER_LINES = 30 +SHOW_USED_FILES = YES +SHOW_FILES = YES +SHOW_NAMESPACES = YES +FILE_VERSION_FILTER = +LAYOUT_FILE = "/Users/alatham/Documents/GitHub/IMP_Spatiotemporal_tutorial/support/tutorial_tools/doxygen/layout.xml" +CITE_BIB_FILES = +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- +QUIET = NO +WARNINGS = YES +WARN_IF_UNDOCUMENTED = YES +WARN_IF_DOC_ERROR = YES +WARN_IF_INCOMPLETE_DOC = YES +WARN_NO_PARAMDOC = NO +WARN_IF_UNDOC_ENUM_VAL = NO +WARN_AS_ERROR = NO +WARN_FORMAT = "$file:$line: $text" +WARN_LINE_FORMAT = "at line $line of file $file" +WARN_LOGFILE = +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- +INPUT = spatiotemporal.md +INPUT_ENCODING = UTF-8 +INPUT_FILE_ENCODING = +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cxxm \ + *.cpp \ + *.cppm \ + *.ccm \ + *.c++ \ + *.c++m \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.ixx \ + *.l \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f18 \ + *.f \ + *.for \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice +RECURSIVE = NO +EXCLUDE = +EXCLUDE_SYMLINKS = NO +EXCLUDE_PATTERNS = +EXCLUDE_SYMBOLS = +EXAMPLE_PATH = .. +EXAMPLE_PATTERNS = * +EXAMPLE_RECURSIVE = NO +IMAGE_PATH = . +INPUT_FILTER = +FILTER_PATTERNS = +FILTER_SOURCE_FILES = NO +FILTER_SOURCE_PATTERNS = +USE_MDFILE_AS_MAINPAGE = +FORTRAN_COMMENT_AFTER = 72 +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- +SOURCE_BROWSER = NO +INLINE_SOURCES = NO +STRIP_CODE_COMMENTS = YES +REFERENCED_BY_RELATION = NO +REFERENCES_RELATION = NO +REFERENCES_LINK_SOURCE = YES +SOURCE_TOOLTIPS = YES +USE_HTAGS = NO +VERBATIM_HEADERS = YES +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- +ALPHABETICAL_INDEX = YES +IGNORE_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- +GENERATE_HTML = YES +HTML_OUTPUT = html +HTML_FILE_EXTENSION = .html +HTML_HEADER = "/Users/alatham/Documents/GitHub/IMP_Spatiotemporal_tutorial/support/tutorial_tools/doxygen/header.html" +HTML_FOOTER = "/Users/alatham/Documents/GitHub/IMP_Spatiotemporal_tutorial/support/tutorial_tools/doxygen/footer.html" +HTML_STYLESHEET = +HTML_EXTRA_STYLESHEET = +HTML_EXTRA_FILES = +HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE_HUE = 220 +HTML_COLORSTYLE_SAT = 100 +HTML_COLORSTYLE_GAMMA = 80 +HTML_DYNAMIC_MENUS = YES +HTML_DYNAMIC_SECTIONS = NO +HTML_CODE_FOLDING = YES +HTML_COPY_CLIPBOARD = YES +HTML_PROJECT_COOKIE = +HTML_INDEX_NUM_ENTRIES = 100 +GENERATE_DOCSET = NO +DOCSET_FEEDNAME = "Doxygen generated docs" +DOCSET_FEEDURL = +DOCSET_BUNDLE_ID = org.doxygen.Project +DOCSET_PUBLISHER_ID = org.doxygen.Publisher +DOCSET_PUBLISHER_NAME = Publisher +GENERATE_HTMLHELP = NO +CHM_FILE = +HHC_LOCATION = +GENERATE_CHI = NO +CHM_INDEX_ENCODING = +BINARY_TOC = NO +TOC_EXPAND = NO +SITEMAP_URL = +GENERATE_QHP = NO +QCH_FILE = +QHP_NAMESPACE = org.doxygen.Project +QHP_VIRTUAL_FOLDER = doc +QHP_CUST_FILTER_NAME = +QHP_CUST_FILTER_ATTRS = +QHP_SECT_FILTER_ATTRS = +QHG_LOCATION = +GENERATE_ECLIPSEHELP = NO +ECLIPSE_DOC_ID = org.doxygen.Project +DISABLE_INDEX = NO +GENERATE_TREEVIEW = NO +FULL_SIDEBAR = NO +ENUM_VALUES_PER_LINE = 4 +TREEVIEW_WIDTH = 250 +EXT_LINKS_IN_WINDOW = NO +OBFUSCATE_EMAILS = YES +HTML_FORMULA_FORMAT = png +FORMULA_FONTSIZE = 10 +FORMULA_MACROFILE = +USE_MATHJAX = YES +MATHJAX_VERSION = MathJax_2 +MATHJAX_FORMAT = HTML-CSS +MATHJAX_RELPATH = //integrativemodeling.org/mathjax +MATHJAX_EXTENSIONS = +MATHJAX_CODEFILE = +SEARCHENGINE = NO +SERVER_BASED_SEARCH = NO +EXTERNAL_SEARCH = NO +SEARCHENGINE_URL = +SEARCHDATA_FILE = searchdata.xml +EXTERNAL_SEARCH_ID = +EXTRA_SEARCH_MAPPINGS = +#--------------------------------------------------------------------------- +# Configuration options related to the LaTeX output +#--------------------------------------------------------------------------- +GENERATE_LATEX = NO +LATEX_OUTPUT = latex +LATEX_CMD_NAME = +MAKEINDEX_CMD_NAME = makeindex +LATEX_MAKEINDEX_CMD = makeindex +COMPACT_LATEX = NO +PAPER_TYPE = a4 +EXTRA_PACKAGES = +LATEX_HEADER = +LATEX_FOOTER = +LATEX_EXTRA_STYLESHEET = +LATEX_EXTRA_FILES = +PDF_HYPERLINKS = YES +USE_PDFLATEX = YES +LATEX_BATCHMODE = NO +LATEX_HIDE_INDICES = NO +LATEX_BIB_STYLE = plain +LATEX_EMOJI_DIRECTORY = +#--------------------------------------------------------------------------- +# Configuration options related to the RTF output +#--------------------------------------------------------------------------- +GENERATE_RTF = NO +RTF_OUTPUT = rtf +COMPACT_RTF = NO +RTF_HYPERLINKS = NO +RTF_STYLESHEET_FILE = +RTF_EXTENSIONS_FILE = +RTF_EXTRA_FILES = +#--------------------------------------------------------------------------- +# Configuration options related to the man page output +#--------------------------------------------------------------------------- +GENERATE_MAN = NO +MAN_OUTPUT = man +MAN_EXTENSION = .3 +MAN_SUBDIR = +MAN_LINKS = NO +#--------------------------------------------------------------------------- +# Configuration options related to the XML output +#--------------------------------------------------------------------------- +GENERATE_XML = NO +XML_OUTPUT = xml +XML_PROGRAMLISTING = YES +XML_NS_MEMB_FILE_SCOPE = NO +#--------------------------------------------------------------------------- +# Configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- +GENERATE_DOCBOOK = NO +DOCBOOK_OUTPUT = docbook +#--------------------------------------------------------------------------- +# Configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- +GENERATE_AUTOGEN_DEF = NO +#--------------------------------------------------------------------------- +# Configuration options related to Sqlite3 output +#--------------------------------------------------------------------------- +GENERATE_SQLITE3 = NO +SQLITE3_OUTPUT = sqlite3 +SQLITE3_RECREATE_DB = YES +#--------------------------------------------------------------------------- +# Configuration options related to the Perl module output +#--------------------------------------------------------------------------- +GENERATE_PERLMOD = NO +PERLMOD_LATEX = NO +PERLMOD_PRETTY = YES +PERLMOD_MAKEVAR_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- +ENABLE_PREPROCESSING = YES +MACRO_EXPANSION = NO +EXPAND_ONLY_PREDEF = NO +SEARCH_INCLUDES = YES +INCLUDE_PATH = +INCLUDE_FILE_PATTERNS = +PREDEFINED = +EXPAND_AS_DEFINED = +SKIP_FUNCTION_MACROS = YES +#--------------------------------------------------------------------------- +# Configuration options related to external references +#--------------------------------------------------------------------------- +TAGFILES = .cache/manual-2.21.0-tags.xml=https://integrativemodeling.org/2.21.0/doc/manual/ .cache/ref-2.21.0-tags.xml=https://integrativemodeling.org/2.21.0/doc/ref/ +GENERATE_TAGFILE = +ALLEXTERNALS = NO +EXTERNAL_GROUPS = YES +EXTERNAL_PAGES = YES +#--------------------------------------------------------------------------- +# Configuration options related to diagram generator tools +#--------------------------------------------------------------------------- +HIDE_UNDOC_RELATIONS = YES +HAVE_DOT = NO +DOT_NUM_THREADS = 0 +DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10" +DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10" +DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4" +DOT_FONTPATH = +CLASS_GRAPH = YES +COLLABORATION_GRAPH = YES +GROUP_GRAPHS = YES +UML_LOOK = NO +UML_LIMIT_NUM_FIELDS = 10 +DOT_UML_DETAILS = NO +DOT_WRAP_THRESHOLD = 17 +TEMPLATE_RELATIONS = NO +INCLUDE_GRAPH = YES +INCLUDED_BY_GRAPH = YES +CALL_GRAPH = NO +CALLER_GRAPH = NO +GRAPHICAL_HIERARCHY = YES +DIRECTORY_GRAPH = YES +DIR_GRAPH_MAX_DEPTH = 1 +DOT_IMAGE_FORMAT = png +INTERACTIVE_SVG = NO +DOT_PATH = +DOTFILE_DIRS = +DIA_PATH = +DIAFILE_DIRS = +PLANTUML_JAR_PATH = +PLANTUML_CFG_FILE = +PLANTUML_INCLUDE_PATH = +DOT_GRAPH_MAX_NODES = 50 +MAX_DOT_GRAPH_DEPTH = 0 +DOT_MULTI_TARGETS = NO +GENERATE_LEGEND = YES +DOT_CLEANUP = YES +MSCGEN_TOOL = +MSCFILE_DIRS = diff --git a/Jupyter/html/bc_s.png b/Jupyter/html/bc_s.png new file mode 100644 index 0000000000000000000000000000000000000000..224b29aa9847d5a4b3902efd602b7ddf7d33e6c2 GIT binary patch literal 676 zcmV;V0$crwP)y__>=_9%My z{n931IS})GlGUF8K#6VIbs%684A^L3@%PlP2>_sk`UWPq@f;rU*V%rPy_ekbhXT&s z(GN{DxFv}*vZp`F>S!r||M`I*nOwwKX+BC~3P5N3-)Y{65c;ywYiAh-1*hZcToLHK ztpl1xomJ+Yb}K(cfbJr2=GNOnT!UFA7Vy~fBz8?J>XHsbZoDad^8PxfSa0GDgENZS zuLCEqzb*xWX2CG*b&5IiO#NzrW*;`VC9455M`o1NBh+(k8~`XCEEoC1Ybwf;vr4K3 zg|EB<07?SOqHp9DhLpS&bzgo70I+ghB_#)K7H%AMU3v}xuyQq9&Bm~++VYhF09a+U zl7>n7Jjm$K#b*FONz~fj;I->Bf;ule1prFN9FovcDGBkpg>)O*-}eLnC{6oZHZ$o% zXKW$;0_{8hxHQ>l;_*HATI(`7t#^{$(zLe}h*mqwOc*nRY9=?Sx4OOeVIfI|0V(V2 zBrW#G7Ss9wvzr@>H*`r>zE z+e8bOBgqIgldUJlG(YUDviMB`9+DH8n-s9SXRLyJHO1!=wY^79WYZMTa(wiZ!zP66 zA~!21vmF3H2{ngD;+`6j#~6j;$*f*G_2ZD1E;9(yaw7d-QnSCpK(cR1zU3qU0000< KMNUMnLSTYoA~SLT literal 0 HcmV?d00001 diff --git a/Jupyter/html/bc_sd.png b/Jupyter/html/bc_sd.png new file mode 100644 index 0000000000000000000000000000000000000000..31ca888dc71049713b35c351933a8d0f36180bf1 GIT binary patch literal 635 zcmV->0)+jEP)Jwi0r1~gdSq#w{Bu1q z`craw(p2!hu$4C_$Oc3X(sI6e=9QSTwPt{G) z=htT&^~&c~L2~e{r5_5SYe7#Is-$ln>~Kd%$F#tC65?{LvQ}8O`A~RBB0N~`2M+waajO;5>3B&-viHGJeEK2TQOiPRa zfDKyqwMc4wfaEh4jt>H`nW_Zidwk@Bowp`}(VUaj-pSI(-1L>FJVsX}Yl9~JsqgsZ zUD9(rMwf23Gez6KPa|wwInZodP-2}9@fK0Ga_9{8SOjU&4l`pH4@qlQp83>>HT$xW zER^U>)MyV%t(Lu=`d=Y?{k1@}&r7ZGkFQ%z%N+sE9BtYjovzxyxCPxN6&@wLK{soQ zSmkj$aLI}miuE^p@~4}mg9OjDfGEkgY4~^XzLRUBB*O{+&vq<3v(E%+k_i%=`~j%{ Vj14gnt9}3g002ovPDHLkV1n!oC4m3{ literal 0 HcmV?d00001 diff --git a/Jupyter/html/clipboard.js b/Jupyter/html/clipboard.js new file mode 100644 index 000000000..42c1fb0e0 --- /dev/null +++ b/Jupyter/html/clipboard.js @@ -0,0 +1,61 @@ +/** + +The code below is based on the Doxygen Awesome project, see +https://github.com/jothepro/doxygen-awesome-css + +MIT License + +Copyright (c) 2021 - 2022 jothepro + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +*/ + +let clipboard_title = "Copy to clipboard" +let clipboard_icon = `` +let clipboard_successIcon = `` +let clipboard_successDuration = 1000 + +$(function() { + if(navigator.clipboard) { + const fragments = document.getElementsByClassName("fragment") + for(const fragment of fragments) { + const clipboard_div = document.createElement("div") + clipboard_div.classList.add("clipboard") + clipboard_div.innerHTML = clipboard_icon + clipboard_div.title = clipboard_title + $(clipboard_div).click(function() { + const content = this.parentNode.cloneNode(true) + // filter out line number and folded fragments from file listings + content.querySelectorAll(".lineno, .ttc, .foldclosed").forEach((node) => { node.remove() }) + let text = content.textContent + // remove trailing newlines and trailing spaces from empty lines + text = text.replace(/^\s*\n/gm,'\n').replace(/\n*$/,'') + navigator.clipboard.writeText(text); + this.classList.add("success") + this.innerHTML = clipboard_successIcon + window.setTimeout(() => { // switch back to normal icon after timeout + this.classList.remove("success") + this.innerHTML = clipboard_icon + }, clipboard_successDuration); + }) + fragment.insertBefore(clipboard_div, fragment.firstChild) + } + } +}) diff --git a/Jupyter/html/closed.png b/Jupyter/html/closed.png new file mode 100644 index 0000000000000000000000000000000000000000..98cc2c909da37a6df914fbf67780eebd99c597f5 GIT binary patch literal 132 zcmeAS@N?(olHy`uVBq!ia0vp^oFL4>1|%O$WD@{V-kvUwAr*{o@8{^CZMh(5KoB^r_<4^zF@3)Cp&&t3hdujKf f*?bjBoY!V+E))@{xMcbjXe@)LtDnm{r-UW|*e5JT literal 0 HcmV?d00001 diff --git a/Jupyter/html/doc.svg b/Jupyter/html/doc.svg new file mode 100644 index 000000000..0b928a531 --- /dev/null +++ b/Jupyter/html/doc.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/Jupyter/html/docd.svg b/Jupyter/html/docd.svg new file mode 100644 index 000000000..ac18b2755 --- /dev/null +++ b/Jupyter/html/docd.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/Jupyter/html/doxygen.css b/Jupyter/html/doxygen.css new file mode 100644 index 000000000..209912c71 --- /dev/null +++ b/Jupyter/html/doxygen.css @@ -0,0 +1,2244 @@ +/* The standard CSS for doxygen 1.11.0*/ + +html { +/* page base colors */ +--page-background-color: white; +--page-foreground-color: black; +--page-link-color: #3D578C; +--page-visited-link-color: #4665A2; + +/* index */ +--index-odd-item-bg-color: #F8F9FC; +--index-even-item-bg-color: white; +--index-header-color: black; +--index-separator-color: #A0A0A0; + +/* header */ +--header-background-color: #F9FAFC; +--header-separator-color: #C4CFE5; +--header-gradient-image: url('nav_h.png'); +--group-header-separator-color: #879ECB; +--group-header-color: #354C7B; +--inherit-header-color: gray; + +--footer-foreground-color: #2A3D61; +--footer-logo-width: 104px; +--citation-label-color: #334975; +--glow-color: cyan; + +--title-background-color: white; +--title-separator-color: #5373B4; +--directory-separator-color: #9CAFD4; +--separator-color: #4A6AAA; + +--blockquote-background-color: #F7F8FB; +--blockquote-border-color: #9CAFD4; + +--scrollbar-thumb-color: #9CAFD4; +--scrollbar-background-color: #F9FAFC; + +--icon-background-color: #728DC1; +--icon-foreground-color: white; +--icon-doc-image: url('doc.svg'); +--icon-folder-open-image: url('folderopen.svg'); +--icon-folder-closed-image: url('folderclosed.svg'); + +/* brief member declaration list */ +--memdecl-background-color: #F9FAFC; +--memdecl-separator-color: #DEE4F0; +--memdecl-foreground-color: #555; +--memdecl-template-color: #4665A2; + +/* detailed member list */ +--memdef-border-color: #A8B8D9; +--memdef-title-background-color: #E2E8F2; +--memdef-title-gradient-image: url('nav_f.png'); +--memdef-proto-background-color: #DFE5F1; +--memdef-proto-text-color: #253555; +--memdef-proto-text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); +--memdef-doc-background-color: white; +--memdef-param-name-color: #602020; +--memdef-template-color: #4665A2; + +/* tables */ +--table-cell-border-color: #2D4068; +--table-header-background-color: #374F7F; +--table-header-foreground-color: #FFFFFF; + +/* labels */ +--label-background-color: #728DC1; +--label-left-top-border-color: #5373B4; +--label-right-bottom-border-color: #C4CFE5; +--label-foreground-color: white; + +/** navigation bar/tree/menu */ +--nav-background-color: #F9FAFC; +--nav-foreground-color: #364D7C; +--nav-gradient-image: url('tab_b.png'); +--nav-gradient-hover-image: url('tab_h.png'); +--nav-gradient-active-image: url('tab_a.png'); +--nav-gradient-active-image-parent: url("../tab_a.png"); +--nav-separator-image: url('tab_s.png'); +--nav-breadcrumb-image: url('bc_s.png'); +--nav-breadcrumb-border-color: #C2CDE4; +--nav-splitbar-image: url('splitbar.png'); +--nav-font-size-level1: 13px; +--nav-font-size-level2: 10px; +--nav-font-size-level3: 9px; +--nav-text-normal-color: #283A5D; +--nav-text-hover-color: white; +--nav-text-active-color: white; +--nav-text-normal-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); +--nav-text-hover-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); +--nav-text-active-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); +--nav-menu-button-color: #364D7C; +--nav-menu-background-color: white; +--nav-menu-foreground-color: #555555; +--nav-menu-toggle-color: rgba(255, 255, 255, 0.5); +--nav-arrow-color: #9CAFD4; +--nav-arrow-selected-color: #9CAFD4; + +/* table of contents */ +--toc-background-color: #F4F6FA; +--toc-border-color: #D8DFEE; +--toc-header-color: #4665A2; +--toc-down-arrow-image: url("data:image/svg+xml;utf8,&%238595;"); + +/** search field */ +--search-background-color: white; +--search-foreground-color: #909090; +--search-magnification-image: url('mag.svg'); +--search-magnification-select-image: url('mag_sel.svg'); +--search-active-color: black; +--search-filter-background-color: #F9FAFC; +--search-filter-foreground-color: black; +--search-filter-border-color: #90A5CE; +--search-filter-highlight-text-color: white; +--search-filter-highlight-bg-color: #3D578C; +--search-results-foreground-color: #425E97; +--search-results-background-color: #EEF1F7; +--search-results-border-color: black; +--search-box-shadow: inset 0.5px 0.5px 3px 0px #555; + +/** code fragments */ +--code-keyword-color: #008000; +--code-type-keyword-color: #604020; +--code-flow-keyword-color: #E08000; +--code-comment-color: #800000; +--code-preprocessor-color: #806020; +--code-string-literal-color: #002080; +--code-char-literal-color: #008080; +--code-xml-cdata-color: black; +--code-vhdl-digit-color: #FF00FF; +--code-vhdl-char-color: #000000; +--code-vhdl-keyword-color: #700070; +--code-vhdl-logic-color: #FF0000; +--code-link-color: #4665A2; +--code-external-link-color: #4665A2; +--fragment-foreground-color: black; +--fragment-background-color: #FBFCFD; +--fragment-border-color: #C4CFE5; +--fragment-lineno-border-color: #00FF00; +--fragment-lineno-background-color: #E8E8E8; +--fragment-lineno-foreground-color: black; +--fragment-lineno-link-fg-color: #4665A2; +--fragment-lineno-link-bg-color: #D8D8D8; +--fragment-lineno-link-hover-fg-color: #4665A2; +--fragment-lineno-link-hover-bg-color: #C8C8C8; +--fragment-copy-ok-color: #2EC82E; +--tooltip-foreground-color: black; +--tooltip-background-color: white; +--tooltip-border-color: gray; +--tooltip-doc-color: grey; +--tooltip-declaration-color: #006318; +--tooltip-link-color: #4665A2; +--tooltip-shadow: 1px 1px 7px gray; +--fold-line-color: #808080; +--fold-minus-image: url('minus.svg'); +--fold-plus-image: url('plus.svg'); +--fold-minus-image-relpath: url('../../minus.svg'); +--fold-plus-image-relpath: url('../../plus.svg'); + +/** font-family */ +--font-family-normal: Roboto,sans-serif; +--font-family-monospace: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; +--font-family-nav: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; +--font-family-title: Tahoma,Arial,sans-serif; +--font-family-toc: Verdana,'DejaVu Sans',Geneva,sans-serif; +--font-family-search: Arial,Verdana,sans-serif; +--font-family-icon: Arial,Helvetica; +--font-family-tooltip: Roboto,sans-serif; + +/** special sections */ +--warning-color-bg: #f8d1cc; +--warning-color-hl: #b61825; +--warning-color-text: #75070f; +--note-color-bg: #faf3d8; +--note-color-hl: #f3a600; +--note-color-text: #5f4204; +--todo-color-bg: #e4f3ff; +--todo-color-hl: #1879C4; +--todo-color-text: #274a5c; +--test-color-bg: #e8e8ff; +--test-color-hl: #3939C4; +--test-color-text: #1a1a5c; +--deprecated-color-bg: #ecf0f3; +--deprecated-color-hl: #5b6269; +--deprecated-color-text: #43454a; +--bug-color-bg: #e4dafd; +--bug-color-hl: #5b2bdd; +--bug-color-text: #2a0d72; +--invariant-color-bg: #d8f1e3; +--invariant-color-hl: #44b86f; +--invariant-color-text: #265532; +} + +@media (prefers-color-scheme: dark) { + html:not(.dark-mode) { + color-scheme: dark; + +/* page base colors */ +--page-background-color: black; +--page-foreground-color: #C9D1D9; +--page-link-color: #90A5CE; +--page-visited-link-color: #A3B4D7; + +/* index */ +--index-odd-item-bg-color: #0B101A; +--index-even-item-bg-color: black; +--index-header-color: #C4CFE5; +--index-separator-color: #334975; + +/* header */ +--header-background-color: #070B11; +--header-separator-color: #141C2E; +--header-gradient-image: url('nav_hd.png'); +--group-header-separator-color: #283A5D; +--group-header-color: #90A5CE; +--inherit-header-color: #A0A0A0; + +--footer-foreground-color: #5B7AB7; +--footer-logo-width: 60px; +--citation-label-color: #90A5CE; +--glow-color: cyan; + +--title-background-color: #090D16; +--title-separator-color: #354C79; +--directory-separator-color: #283A5D; +--separator-color: #283A5D; + +--blockquote-background-color: #101826; +--blockquote-border-color: #283A5D; + +--scrollbar-thumb-color: #283A5D; +--scrollbar-background-color: #070B11; + +--icon-background-color: #334975; +--icon-foreground-color: #C4CFE5; +--icon-doc-image: url('docd.svg'); +--icon-folder-open-image: url('folderopend.svg'); +--icon-folder-closed-image: url('folderclosedd.svg'); + +/* brief member declaration list */ +--memdecl-background-color: #0B101A; +--memdecl-separator-color: #2C3F65; +--memdecl-foreground-color: #BBB; +--memdecl-template-color: #7C95C6; + +/* detailed member list */ +--memdef-border-color: #233250; +--memdef-title-background-color: #1B2840; +--memdef-title-gradient-image: url('nav_fd.png'); +--memdef-proto-background-color: #19243A; +--memdef-proto-text-color: #9DB0D4; +--memdef-proto-text-shadow: 0px 1px 1px rgba(0, 0, 0, 0.9); +--memdef-doc-background-color: black; +--memdef-param-name-color: #D28757; +--memdef-template-color: #7C95C6; + +/* tables */ +--table-cell-border-color: #283A5D; +--table-header-background-color: #283A5D; +--table-header-foreground-color: #C4CFE5; + +/* labels */ +--label-background-color: #354C7B; +--label-left-top-border-color: #4665A2; +--label-right-bottom-border-color: #283A5D; +--label-foreground-color: #CCCCCC; + +/** navigation bar/tree/menu */ +--nav-background-color: #101826; +--nav-foreground-color: #364D7C; +--nav-gradient-image: url('tab_bd.png'); +--nav-gradient-hover-image: url('tab_hd.png'); +--nav-gradient-active-image: url('tab_ad.png'); +--nav-gradient-active-image-parent: url("../tab_ad.png"); +--nav-separator-image: url('tab_sd.png'); +--nav-breadcrumb-image: url('bc_sd.png'); +--nav-breadcrumb-border-color: #2A3D61; +--nav-splitbar-image: url('splitbard.png'); +--nav-font-size-level1: 13px; +--nav-font-size-level2: 10px; +--nav-font-size-level3: 9px; +--nav-text-normal-color: #B6C4DF; +--nav-text-hover-color: #DCE2EF; +--nav-text-active-color: #DCE2EF; +--nav-text-normal-shadow: 0px 1px 1px black; +--nav-text-hover-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); +--nav-text-active-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); +--nav-menu-button-color: #B6C4DF; +--nav-menu-background-color: #05070C; +--nav-menu-foreground-color: #BBBBBB; +--nav-menu-toggle-color: rgba(255, 255, 255, 0.2); +--nav-arrow-color: #334975; +--nav-arrow-selected-color: #90A5CE; + +/* table of contents */ +--toc-background-color: #151E30; +--toc-border-color: #202E4A; +--toc-header-color: #A3B4D7; +--toc-down-arrow-image: url("data:image/svg+xml;utf8,&%238595;"); + +/** search field */ +--search-background-color: black; +--search-foreground-color: #C5C5C5; +--search-magnification-image: url('mag_d.svg'); +--search-magnification-select-image: url('mag_seld.svg'); +--search-active-color: #C5C5C5; +--search-filter-background-color: #101826; +--search-filter-foreground-color: #90A5CE; +--search-filter-border-color: #7C95C6; +--search-filter-highlight-text-color: #BCC9E2; +--search-filter-highlight-bg-color: #283A5D; +--search-results-background-color: #101826; +--search-results-foreground-color: #90A5CE; +--search-results-border-color: #7C95C6; +--search-box-shadow: inset 0.5px 0.5px 3px 0px #2F436C; + +/** code fragments */ +--code-keyword-color: #CC99CD; +--code-type-keyword-color: #AB99CD; +--code-flow-keyword-color: #E08000; +--code-comment-color: #717790; +--code-preprocessor-color: #65CABE; +--code-string-literal-color: #7EC699; +--code-char-literal-color: #00E0F0; +--code-xml-cdata-color: #C9D1D9; +--code-vhdl-digit-color: #FF00FF; +--code-vhdl-char-color: #C0C0C0; +--code-vhdl-keyword-color: #CF53C9; +--code-vhdl-logic-color: #FF0000; +--code-link-color: #79C0FF; +--code-external-link-color: #79C0FF; +--fragment-foreground-color: #C9D1D9; +--fragment-background-color: #090D16; +--fragment-border-color: #30363D; +--fragment-lineno-border-color: #30363D; +--fragment-lineno-background-color: black; +--fragment-lineno-foreground-color: #6E7681; +--fragment-lineno-link-fg-color: #6E7681; +--fragment-lineno-link-bg-color: #303030; +--fragment-lineno-link-hover-fg-color: #8E96A1; +--fragment-lineno-link-hover-bg-color: #505050; +--fragment-copy-ok-color: #0EA80E; +--tooltip-foreground-color: #C9D1D9; +--tooltip-background-color: #202020; +--tooltip-border-color: #C9D1D9; +--tooltip-doc-color: #D9E1E9; +--tooltip-declaration-color: #20C348; +--tooltip-link-color: #79C0FF; +--tooltip-shadow: none; +--fold-line-color: #808080; +--fold-minus-image: url('minusd.svg'); +--fold-plus-image: url('plusd.svg'); +--fold-minus-image-relpath: url('../../minusd.svg'); +--fold-plus-image-relpath: url('../../plusd.svg'); + +/** font-family */ +--font-family-normal: Roboto,sans-serif; +--font-family-monospace: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; +--font-family-nav: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; +--font-family-title: Tahoma,Arial,sans-serif; +--font-family-toc: Verdana,'DejaVu Sans',Geneva,sans-serif; +--font-family-search: Arial,Verdana,sans-serif; +--font-family-icon: Arial,Helvetica; +--font-family-tooltip: Roboto,sans-serif; + +/** special sections */ +--warning-color-bg: #2e1917; +--warning-color-hl: #ad2617; +--warning-color-text: #f5b1aa; +--note-color-bg: #3b2e04; +--note-color-hl: #f1b602; +--note-color-text: #ceb670; +--todo-color-bg: #163750; +--todo-color-hl: #1982D2; +--todo-color-text: #dcf0fa; +--test-color-bg: #121258; +--test-color-hl: #4242cf; +--test-color-text: #c0c0da; +--deprecated-color-bg: #2e323b; +--deprecated-color-hl: #738396; +--deprecated-color-text: #abb0bd; +--bug-color-bg: #2a2536; +--bug-color-hl: #7661b3; +--bug-color-text: #ae9ed6; +--invariant-color-bg: #303a35; +--invariant-color-hl: #76ce96; +--invariant-color-text: #cceed5; +}} +body { + background-color: var(--page-background-color); + color: var(--page-foreground-color); +} + +body, table, div, p, dl { + font-weight: 400; + font-size: 14px; + font-family: var(--font-family-normal); + line-height: 22px; +} + +/* @group Heading Levels */ + +.title { + font-family: var(--font-family-normal); + line-height: 28px; + font-size: 150%; + font-weight: bold; + margin: 10px 2px; +} + +h1.groupheader { + font-size: 150%; +} + +h2.groupheader { + border-bottom: 1px solid var(--group-header-separator-color); + color: var(--group-header-color); + font-size: 150%; + font-weight: normal; + margin-top: 1.75em; + padding-top: 8px; + padding-bottom: 4px; + width: 100%; +} + +h3.groupheader { + font-size: 100%; +} + +h1, h2, h3, h4, h5, h6 { + -webkit-transition: text-shadow 0.5s linear; + -moz-transition: text-shadow 0.5s linear; + -ms-transition: text-shadow 0.5s linear; + -o-transition: text-shadow 0.5s linear; + transition: text-shadow 0.5s linear; + margin-right: 15px; +} + +h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { + text-shadow: 0 0 15px var(--glow-color); +} + +dt { + font-weight: bold; +} + +p.startli, p.startdd { + margin-top: 2px; +} + +th p.starttd, th p.intertd, th p.endtd { + font-size: 100%; + font-weight: 700; +} + +p.starttd { + margin-top: 0px; +} + +p.endli { + margin-bottom: 0px; +} + +p.enddd { + margin-bottom: 4px; +} + +p.endtd { + margin-bottom: 2px; +} + +p.interli { +} + +p.interdd { +} + +p.intertd { +} + +/* @end */ + +caption { + font-weight: bold; +} + +span.legend { + font-size: 70%; + text-align: center; +} + +h3.version { + font-size: 90%; + text-align: center; +} + +div.navtab { + padding-right: 15px; + text-align: right; + line-height: 110%; +} + +div.navtab table { + border-spacing: 0; +} + +td.navtab { + padding-right: 6px; + padding-left: 6px; +} + +td.navtabHL { + background-image: var(--nav-gradient-active-image); + background-repeat:repeat-x; + padding-right: 6px; + padding-left: 6px; +} + +td.navtabHL a, td.navtabHL a:visited { + color: var(--nav-text-hover-color); + text-shadow: var(--nav-text-hover-shadow); +} + +a.navtab { + font-weight: bold; +} + +div.qindex{ + text-align: center; + width: 100%; + line-height: 140%; + font-size: 130%; + color: var(--index-separator-color); +} + +#main-menu a:focus { + outline: auto; + z-index: 10; + position: relative; +} + +dt.alphachar{ + font-size: 180%; + font-weight: bold; +} + +.alphachar a{ + color: var(--index-header-color); +} + +.alphachar a:hover, .alphachar a:visited{ + text-decoration: none; +} + +.classindex dl { + padding: 25px; + column-count:1 +} + +.classindex dd { + display:inline-block; + margin-left: 50px; + width: 90%; + line-height: 1.15em; +} + +.classindex dl.even { + background-color: var(--index-even-item-bg-color); +} + +.classindex dl.odd { + background-color: var(--index-odd-item-bg-color); +} + +@media(min-width: 1120px) { + .classindex dl { + column-count:2 + } +} + +@media(min-width: 1320px) { + .classindex dl { + column-count:3 + } +} + + +/* @group Link Styling */ + +a { + color: var(--page-link-color); + font-weight: normal; + text-decoration: none; +} + +.contents a:visited { + color: var(--page-visited-link-color); +} + +a:hover { + text-decoration: none; + background: linear-gradient(to bottom, transparent 0,transparent calc(100% - 1px), currentColor 100%); +} + +a:hover > span.arrow { + text-decoration: none; + background : var(--nav-background-color); +} + +a.el { + font-weight: bold; +} + +a.elRef { +} + +a.code, a.code:visited, a.line, a.line:visited { + color: var(--code-link-color); +} + +a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { + color: var(--code-external-link-color); +} + +a.code.hl_class { /* style for links to class names in code snippets */ } +a.code.hl_struct { /* style for links to struct names in code snippets */ } +a.code.hl_union { /* style for links to union names in code snippets */ } +a.code.hl_interface { /* style for links to interface names in code snippets */ } +a.code.hl_protocol { /* style for links to protocol names in code snippets */ } +a.code.hl_category { /* style for links to category names in code snippets */ } +a.code.hl_exception { /* style for links to exception names in code snippets */ } +a.code.hl_service { /* style for links to service names in code snippets */ } +a.code.hl_singleton { /* style for links to singleton names in code snippets */ } +a.code.hl_concept { /* style for links to concept names in code snippets */ } +a.code.hl_namespace { /* style for links to namespace names in code snippets */ } +a.code.hl_package { /* style for links to package names in code snippets */ } +a.code.hl_define { /* style for links to macro names in code snippets */ } +a.code.hl_function { /* style for links to function names in code snippets */ } +a.code.hl_variable { /* style for links to variable names in code snippets */ } +a.code.hl_typedef { /* style for links to typedef names in code snippets */ } +a.code.hl_enumvalue { /* style for links to enum value names in code snippets */ } +a.code.hl_enumeration { /* style for links to enumeration names in code snippets */ } +a.code.hl_signal { /* style for links to Qt signal names in code snippets */ } +a.code.hl_slot { /* style for links to Qt slot names in code snippets */ } +a.code.hl_friend { /* style for links to friend names in code snippets */ } +a.code.hl_dcop { /* style for links to KDE3 DCOP names in code snippets */ } +a.code.hl_property { /* style for links to property names in code snippets */ } +a.code.hl_event { /* style for links to event names in code snippets */ } +a.code.hl_sequence { /* style for links to sequence names in code snippets */ } +a.code.hl_dictionary { /* style for links to dictionary names in code snippets */ } + +/* @end */ + +dl.el { + margin-left: -1cm; +} + +ul.check { + list-style:none; + text-indent: -16px; + padding-left: 38px; +} +li.unchecked:before { + content: "\2610\A0"; +} +li.checked:before { + content: "\2611\A0"; +} + +ol { + text-indent: 0px; +} + +ul { + text-indent: 0px; + overflow: visible; +} + +ul.multicol { + -moz-column-gap: 1em; + -webkit-column-gap: 1em; + column-gap: 1em; + -moz-column-count: 3; + -webkit-column-count: 3; + column-count: 3; + list-style-type: none; +} + +#side-nav ul { + overflow: visible; /* reset ul rule for scroll bar in GENERATE_TREEVIEW window */ +} + +#main-nav ul { + overflow: visible; /* reset ul rule for the navigation bar drop down lists */ +} + +.fragment { + text-align: left; + direction: ltr; + overflow-x: auto; + overflow-y: hidden; + position: relative; + min-height: 12px; + margin: 10px 0px; + padding: 10px 10px; + border: 1px solid var(--fragment-border-color); + border-radius: 4px; + background-color: var(--fragment-background-color); + color: var(--fragment-foreground-color); +} + +pre.fragment { + word-wrap: break-word; + font-size: 10pt; + line-height: 125%; + font-family: var(--font-family-monospace); +} + +.clipboard { + width: 24px; + height: 24px; + right: 5px; + top: 5px; + opacity: 0; + position: absolute; + display: inline; + overflow: auto; + fill: var(--fragment-foreground-color); + justify-content: center; + align-items: center; + cursor: pointer; +} + +.clipboard.success { + border: 1px solid var(--fragment-foreground-color); + border-radius: 4px; +} + +.fragment:hover .clipboard, .clipboard.success { + opacity: .28; +} + +.clipboard:hover, .clipboard.success { + opacity: 1 !important; +} + +.clipboard:active:not([class~=success]) svg { + transform: scale(.91); +} + +.clipboard.success svg { + fill: var(--fragment-copy-ok-color); +} + +.clipboard.success { + border-color: var(--fragment-copy-ok-color); +} + +div.line { + font-family: var(--font-family-monospace); + font-size: 13px; + min-height: 13px; + line-height: 1.2; + text-wrap: unrestricted; + white-space: -moz-pre-wrap; /* Moz */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: pre-wrap; /* CSS3 */ + word-wrap: break-word; /* IE 5.5+ */ + text-indent: -53px; + padding-left: 53px; + padding-bottom: 0px; + margin: 0px; + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +div.line:after { + content:"\000A"; + white-space: pre; +} + +div.line.glow { + background-color: var(--glow-color); + box-shadow: 0 0 10px var(--glow-color); +} + +span.fold { + margin-left: 5px; + margin-right: 1px; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; + display: inline-block; + width: 12px; + height: 12px; + background-repeat:no-repeat; + background-position:center; +} + +span.lineno { + padding-right: 4px; + margin-right: 9px; + text-align: right; + border-right: 2px solid var(--fragment-lineno-border-color); + color: var(--fragment-lineno-foreground-color); + background-color: var(--fragment-lineno-background-color); + white-space: pre; +} +span.lineno a, span.lineno a:visited { + color: var(--fragment-lineno-link-fg-color); + background-color: var(--fragment-lineno-link-bg-color); +} + +span.lineno a:hover { + color: var(--fragment-lineno-link-hover-fg-color); + background-color: var(--fragment-lineno-link-hover-bg-color); +} + +.lineno { + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +div.classindex ul { + list-style: none; + padding-left: 0; +} + +div.classindex span.ai { + display: inline-block; +} + +div.groupHeader { + margin-left: 16px; + margin-top: 12px; + font-weight: bold; +} + +div.groupText { + margin-left: 16px; + font-style: italic; +} + +body { + color: var(--page-foreground-color); + margin: 0; +} + +div.contents { + margin-top: 10px; + margin-left: 12px; + margin-right: 8px; +} + +p.formulaDsp { + text-align: center; +} + +img.dark-mode-visible { + display: none; +} +img.light-mode-visible { + display: none; +} + +img.formulaInl, img.inline { + vertical-align: middle; +} + +div.center { + text-align: center; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; +} + +div.center img { + border: 0px; +} + +address.footer { + text-align: right; + padding-right: 12px; +} + +img.footer { + border: 0px; + vertical-align: middle; + width: var(--footer-logo-width); +} + +.compoundTemplParams { + color: var(--memdecl-template-color); + font-size: 80%; + line-height: 120%; +} + +/* @group Code Colorization */ + +span.keyword { + color: var(--code-keyword-color); +} + +span.keywordtype { + color: var(--code-type-keyword-color); +} + +span.keywordflow { + color: var(--code-flow-keyword-color); +} + +span.comment { + color: var(--code-comment-color); +} + +span.preprocessor { + color: var(--code-preprocessor-color); +} + +span.stringliteral { + color: var(--code-string-literal-color); +} + +span.charliteral { + color: var(--code-char-literal-color); +} + +span.xmlcdata { + color: var(--code-xml-cdata-color); +} + +span.vhdldigit { + color: var(--code-vhdl-digit-color); +} + +span.vhdlchar { + color: var(--code-vhdl-char-color); +} + +span.vhdlkeyword { + color: var(--code-vhdl-keyword-color); +} + +span.vhdllogic { + color: var(--code-vhdl-logic-color); +} + +blockquote { + background-color: var(--blockquote-background-color); + border-left: 2px solid var(--blockquote-border-color); + margin: 0 24px 0 4px; + padding: 0 12px 0 16px; +} + +/* @end */ + +td.tiny { + font-size: 75%; +} + +.dirtab { + padding: 4px; + border-collapse: collapse; + border: 1px solid var(--table-cell-border-color); +} + +th.dirtab { + background-color: var(--table-header-background-color); + color: var(--table-header-foreground-color); + font-weight: bold; +} + +hr { + height: 0px; + border: none; + border-top: 1px solid var(--separator-color); +} + +hr.footer { + height: 1px; +} + +/* @group Member Descriptions */ + +table.memberdecls { + border-spacing: 0px; + padding: 0px; +} + +.memberdecls td, .fieldtable tr { + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +.memberdecls td.glow, .fieldtable tr.glow { + background-color: var(--glow-color); + box-shadow: 0 0 15px var(--glow-color); +} + +.mdescLeft, .mdescRight, +.memItemLeft, .memItemRight, +.memTemplItemLeft, .memTemplItemRight, .memTemplParams { + background-color: var(--memdecl-background-color); + border: none; + margin: 4px; + padding: 1px 0 0 8px; +} + +.mdescLeft, .mdescRight { + padding: 0px 8px 4px 8px; + color: var(--memdecl-foreground-color); +} + +.memSeparator { + border-bottom: 1px solid var(--memdecl-separator-color); + line-height: 1px; + margin: 0px; + padding: 0px; +} + +.memItemLeft, .memTemplItemLeft { + white-space: nowrap; +} + +.memItemRight, .memTemplItemRight { + width: 100%; +} + +.memTemplParams { + color: var(--memdecl-template-color); + white-space: nowrap; + font-size: 80%; +} + +/* @end */ + +/* @group Member Details */ + +/* Styles for detailed member documentation */ + +.memtitle { + padding: 8px; + border-top: 1px solid var(--memdef-border-color); + border-left: 1px solid var(--memdef-border-color); + border-right: 1px solid var(--memdef-border-color); + border-top-right-radius: 4px; + border-top-left-radius: 4px; + margin-bottom: -1px; + background-image: var(--memdef-title-gradient-image); + background-repeat: repeat-x; + background-color: var(--memdef-title-background-color); + line-height: 1.25; + font-weight: 300; + float:left; +} + +.permalink +{ + font-size: 65%; + display: inline-block; + vertical-align: middle; +} + +.memtemplate { + font-size: 80%; + color: var(--memdef-template-color); + font-weight: normal; + margin-left: 9px; +} + +.mempage { + width: 100%; +} + +.memitem { + padding: 0; + margin-bottom: 10px; + margin-right: 5px; + -webkit-transition: box-shadow 0.5s linear; + -moz-transition: box-shadow 0.5s linear; + -ms-transition: box-shadow 0.5s linear; + -o-transition: box-shadow 0.5s linear; + transition: box-shadow 0.5s linear; + display: table !important; + width: 100%; +} + +.memitem.glow { + box-shadow: 0 0 15px var(--glow-color); +} + +.memname { + font-weight: 400; + margin-left: 6px; +} + +.memname td { + vertical-align: bottom; +} + +.memproto, dl.reflist dt { + border-top: 1px solid var(--memdef-border-color); + border-left: 1px solid var(--memdef-border-color); + border-right: 1px solid var(--memdef-border-color); + padding: 6px 0px 6px 0px; + color: var(--memdef-proto-text-color); + font-weight: bold; + text-shadow: var(--memdef-proto-text-shadow); + background-color: var(--memdef-proto-background-color); + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + border-top-right-radius: 4px; +} + +.overload { + font-family: var(--font-family-monospace); + font-size: 65%; +} + +.memdoc, dl.reflist dd { + border-bottom: 1px solid var(--memdef-border-color); + border-left: 1px solid var(--memdef-border-color); + border-right: 1px solid var(--memdef-border-color); + padding: 6px 10px 2px 10px; + border-top-width: 0; + background-image:url('nav_g.png'); + background-repeat:repeat-x; + background-color: var(--memdef-doc-background-color); + /* opera specific markup */ + border-bottom-left-radius: 4px; + border-bottom-right-radius: 4px; + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + /* firefox specific markup */ + -moz-border-radius-bottomleft: 4px; + -moz-border-radius-bottomright: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + /* webkit specific markup */ + -webkit-border-bottom-left-radius: 4px; + -webkit-border-bottom-right-radius: 4px; + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); +} + +dl.reflist dt { + padding: 5px; +} + +dl.reflist dd { + margin: 0px 0px 10px 0px; + padding: 5px; +} + +.paramkey { + text-align: right; +} + +.paramtype { + white-space: nowrap; + padding: 0px; + padding-bottom: 1px; +} + +.paramname { + white-space: nowrap; + padding: 0px; + padding-bottom: 1px; + margin-left: 2px; +} + +.paramname em { + color: var(--memdef-param-name-color); + font-style: normal; + margin-right: 1px; +} + +.paramname .paramdefval { + font-family: var(--font-family-monospace); +} + +.params, .retval, .exception, .tparams { + margin-left: 0px; + padding-left: 0px; +} + +.params .paramname, .retval .paramname, .tparams .paramname, .exception .paramname { + font-weight: bold; + vertical-align: top; +} + +.params .paramtype, .tparams .paramtype { + font-style: italic; + vertical-align: top; +} + +.params .paramdir, .tparams .paramdir { + font-family: var(--font-family-monospace); + vertical-align: top; +} + +table.mlabels { + border-spacing: 0px; +} + +td.mlabels-left { + width: 100%; + padding: 0px; +} + +td.mlabels-right { + vertical-align: bottom; + padding: 0px; + white-space: nowrap; +} + +span.mlabels { + margin-left: 8px; +} + +span.mlabel { + background-color: var(--label-background-color); + border-top:1px solid var(--label-left-top-border-color); + border-left:1px solid var(--label-left-top-border-color); + border-right:1px solid var(--label-right-bottom-border-color); + border-bottom:1px solid var(--label-right-bottom-border-color); + text-shadow: none; + color: var(--label-foreground-color); + margin-right: 4px; + padding: 2px 3px; + border-radius: 3px; + font-size: 7pt; + white-space: nowrap; + vertical-align: middle; +} + + + +/* @end */ + +/* these are for tree view inside a (index) page */ + +div.directory { + margin: 10px 0px; + border-top: 1px solid var(--directory-separator-color); + border-bottom: 1px solid var(--directory-separator-color); + width: 100%; +} + +.directory table { + border-collapse:collapse; +} + +.directory td { + margin: 0px; + padding: 0px; + vertical-align: top; +} + +.directory td.entry { + white-space: nowrap; + padding-right: 6px; + padding-top: 3px; +} + +.directory td.entry a { + outline:none; +} + +.directory td.entry a img { + border: none; +} + +.directory td.desc { + width: 100%; + padding-left: 6px; + padding-right: 6px; + padding-top: 3px; + border-left: 1px solid rgba(0,0,0,0.05); +} + +.directory tr.odd { + padding-left: 6px; + background-color: var(--index-odd-item-bg-color); +} + +.directory tr.even { + padding-left: 6px; + background-color: var(--index-even-item-bg-color); +} + +.directory img { + vertical-align: -30%; +} + +.directory .levels { + white-space: nowrap; + width: 100%; + text-align: right; + font-size: 9pt; +} + +.directory .levels span { + cursor: pointer; + padding-left: 2px; + padding-right: 2px; + color: var(--page-link-color); +} + +.arrow { + color: var(--nav-arrow-color); + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + cursor: pointer; + font-size: 80%; + display: inline-block; + width: 16px; + height: 22px; +} + +.icon { + font-family: var(--font-family-icon); + line-height: normal; + font-weight: bold; + font-size: 12px; + height: 14px; + width: 16px; + display: inline-block; + background-color: var(--icon-background-color); + color: var(--icon-foreground-color); + text-align: center; + border-radius: 4px; + margin-left: 2px; + margin-right: 2px; +} + +.icona { + width: 24px; + height: 22px; + display: inline-block; +} + +.iconfopen { + width: 24px; + height: 18px; + margin-bottom: 4px; + background-image:var(--icon-folder-open-image); + background-repeat: repeat-y; + vertical-align:top; + display: inline-block; +} + +.iconfclosed { + width: 24px; + height: 18px; + margin-bottom: 4px; + background-image:var(--icon-folder-closed-image); + background-repeat: repeat-y; + vertical-align:top; + display: inline-block; +} + +.icondoc { + width: 24px; + height: 18px; + margin-bottom: 4px; + background-image:var(--icon-doc-image); + background-position: 0px -4px; + background-repeat: repeat-y; + vertical-align:top; + display: inline-block; +} + +/* @end */ + +div.dynheader { + margin-top: 8px; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +address { + font-style: normal; + color: var(--footer-foreground-color); +} + +table.doxtable caption { + caption-side: top; +} + +table.doxtable { + border-collapse:collapse; + margin-top: 4px; + margin-bottom: 4px; +} + +table.doxtable td, table.doxtable th { + border: 1px solid var(--table-cell-border-color); + padding: 3px 7px 2px; +} + +table.doxtable th { + background-color: var(--table-header-background-color); + color: var(--table-header-foreground-color); + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; +} + +table.fieldtable { + margin-bottom: 10px; + border: 1px solid var(--memdef-border-color); + border-spacing: 0px; + border-radius: 4px; + box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); +} + +.fieldtable td, .fieldtable th { + padding: 3px 7px 2px; +} + +.fieldtable td.fieldtype, .fieldtable td.fieldname { + white-space: nowrap; + border-right: 1px solid var(--memdef-border-color); + border-bottom: 1px solid var(--memdef-border-color); + vertical-align: top; +} + +.fieldtable td.fieldname { + padding-top: 3px; +} + +.fieldtable td.fielddoc { + border-bottom: 1px solid var(--memdef-border-color); +} + +.fieldtable td.fielddoc p:first-child { + margin-top: 0px; +} + +.fieldtable td.fielddoc p:last-child { + margin-bottom: 2px; +} + +.fieldtable tr:last-child td { + border-bottom: none; +} + +.fieldtable th { + background-image: var(--memdef-title-gradient-image); + background-repeat:repeat-x; + background-color: var(--memdef-title-background-color); + font-size: 90%; + color: var(--memdef-proto-text-color); + padding-bottom: 4px; + padding-top: 5px; + text-align:left; + font-weight: 400; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + border-bottom: 1px solid var(--memdef-border-color); +} + + +.tabsearch { + top: 0px; + left: 10px; + height: 36px; + background-image: var(--nav-gradient-image); + z-index: 101; + overflow: hidden; + font-size: 13px; +} + +.navpath ul +{ + font-size: 11px; + background-image: var(--nav-gradient-image); + background-repeat:repeat-x; + background-position: 0 -5px; + height:30px; + line-height:30px; + color:var(--nav-text-normal-color); + border:solid 1px var(--nav-breadcrumb-border-color); + overflow:hidden; + margin:0px; + padding:0px; +} + +.navpath li +{ + list-style-type:none; + float:left; + padding-left:10px; + padding-right:15px; + background-image:var(--nav-breadcrumb-image); + background-repeat:no-repeat; + background-position:right; + color: var(--nav-foreground-color); +} + +.navpath li.navelem a +{ + height:32px; + display:block; + outline: none; + color: var(--nav-text-normal-color); + font-family: var(--font-family-nav); + text-shadow: var(--nav-text-normal-shadow); + text-decoration: none; +} + +.navpath li.navelem a:hover +{ + color: var(--nav-text-hover-color); + text-shadow: var(--nav-text-hover-shadow); +} + +.navpath li.footer +{ + list-style-type:none; + float:right; + padding-left:10px; + padding-right:15px; + background-image:none; + background-repeat:no-repeat; + background-position:right; + color: var(--footer-foreground-color); + font-size: 8pt; +} + + +div.summary +{ + float: right; + font-size: 8pt; + padding-right: 5px; + width: 50%; + text-align: right; +} + +div.summary a +{ + white-space: nowrap; +} + +table.classindex +{ + margin: 10px; + white-space: nowrap; + margin-left: 3%; + margin-right: 3%; + width: 94%; + border: 0; + border-spacing: 0; + padding: 0; +} + +div.ingroups +{ + font-size: 8pt; + width: 50%; + text-align: left; +} + +div.ingroups a +{ + white-space: nowrap; +} + +div.header +{ + background-image: var(--header-gradient-image); + background-repeat:repeat-x; + background-color: var(--header-background-color); + margin: 0px; + border-bottom: 1px solid var(--header-separator-color); +} + +div.headertitle +{ + padding: 5px 5px 5px 10px; +} + +.PageDocRTL-title div.headertitle { + text-align: right; + direction: rtl; +} + +dl { + padding: 0 0 0 0; +} + +/* + +dl.section { + margin-left: 0px; + padding-left: 0px; +} + +dl.note { + margin-left: -7px; + padding-left: 3px; + border-left: 4px solid; + border-color: #D0C000; +} + +dl.warning, dl.attention, dl.important { + margin-left: -7px; + padding-left: 3px; + border-left: 4px solid; + border-color: #FF0000; +} + +dl.pre, dl.post, dl.invariant { + margin-left: -7px; + padding-left: 3px; + border-left: 4px solid; + border-color: #00D000; +} + +dl.deprecated { + margin-left: -7px; + padding-left: 3px; + border-left: 4px solid; + border-color: #505050; +} + +dl.todo { + margin-left: -7px; + padding-left: 3px; + border-left: 4px solid; + border-color: #00C0E0; +} + +dl.test { + margin-left: -7px; + padding-left: 3px; + border-left: 4px solid; + border-color: #3030E0; +} + +dl.bug { + margin-left: -7px; + padding-left: 3px; + border-left: 4px solid; + border-color: #C08050; +} + +*/ + +dl.bug dt a, dl.deprecated dt a, dl.todo dt a, dl.test a { + font-weight: bold !important; +} + +dl.warning, dl.attention, dl.important, dl.note, dl.deprecated, dl.bug, +dl.invariant, dl.pre, dl.post, dl.todo, dl.test, dl.remark { + padding: 10px; + margin: 10px 0px; + overflow: hidden; + margin-left: 0; + border-radius: 4px; +} + +dl.section dd { + margin-bottom: 2px; +} + +dl.warning, dl.attention, dl.important { + background: var(--warning-color-bg); + border-left: 8px solid var(--warning-color-hl); + color: var(--warning-color-text); +} + +dl.warning dt, dl.attention dt, dl.important dt { + color: var(--warning-color-hl); +} + +dl.note, dl.remark { + background: var(--note-color-bg); + border-left: 8px solid var(--note-color-hl); + color: var(--note-color-text); +} + +dl.note dt, dl.remark dt { + color: var(--note-color-hl); +} + +dl.todo { + background: var(--todo-color-bg); + border-left: 8px solid var(--todo-color-hl); + color: var(--todo-color-text); +} + +dl.todo dt { + color: var(--todo-color-hl); +} + +dl.test { + background: var(--test-color-bg); + border-left: 8px solid var(--test-color-hl); + color: var(--test-color-text); +} + +dl.test dt { + color: var(--test-color-hl); +} + +dl.bug dt a { + color: var(--bug-color-hl) !important; +} + +dl.bug { + background: var(--bug-color-bg); + border-left: 8px solid var(--bug-color-hl); + color: var(--bug-color-text); +} + +dl.bug dt a { + color: var(--bug-color-hl) !important; +} + +dl.deprecated { + background: var(--deprecated-color-bg); + border-left: 8px solid var(--deprecated-color-hl); + color: var(--deprecated-color-text); +} + +dl.deprecated dt a { + color: var(--deprecated-color-hl) !important; +} + +dl.note dd, dl.warning dd, dl.pre dd, dl.post dd, +dl.remark dd, dl.attention dd, dl.important dd, dl.invariant dd, +dl.bug dd, dl.deprecated dd, dl.todo dd, dl.test dd { + margin-inline-start: 0px; +} + +dl.invariant, dl.pre, dl.post { + background: var(--invariant-color-bg); + border-left: 8px solid var(--invariant-color-hl); + color: var(--invariant-color-text); +} + +dl.invariant dt, dl.pre dt, dl.post dt { + color: var(--invariant-color-hl); +} + + +#projectrow +{ + height: 56px; +} + +#projectlogo +{ + text-align: center; + vertical-align: bottom; + border-collapse: separate; +} + +#projectlogo img +{ + border: 0px none; +} + +#projectalign +{ + vertical-align: middle; + padding-left: 0.5em; +} + +#projectname +{ + font-size: 200%; + font-family: var(--font-family-title); + margin: 0px; + padding: 2px 0px; +} + +#projectbrief +{ + font-size: 90%; + font-family: var(--font-family-title); + margin: 0px; + padding: 0px; +} + +#projectnumber +{ + font-size: 50%; + font-family: 50% var(--font-family-title); + margin: 0px; + padding: 0px; +} + +#titlearea +{ + padding: 0px; + margin: 0px; + width: 100%; + border-bottom: 1px solid var(--title-separator-color); + background-color: var(--title-background-color); +} + +.image +{ + text-align: center; +} + +.dotgraph +{ + text-align: center; +} + +.mscgraph +{ + text-align: center; +} + +.plantumlgraph +{ + text-align: center; +} + +.diagraph +{ + text-align: center; +} + +.caption +{ + font-weight: bold; +} + +dl.citelist { + margin-bottom:50px; +} + +dl.citelist dt { + color:var(--citation-label-color); + float:left; + font-weight:bold; + margin-right:10px; + padding:5px; + text-align:right; + width:52px; +} + +dl.citelist dd { + margin:2px 0 2px 72px; + padding:5px 0; +} + +div.toc { + padding: 14px 25px; + background-color: var(--toc-background-color); + border: 1px solid var(--toc-border-color); + border-radius: 7px 7px 7px 7px; + float: right; + height: auto; + margin: 0 8px 10px 10px; + width: 200px; +} + +div.toc li { + background: var(--toc-down-arrow-image) no-repeat scroll 0 5px transparent; + font: 10px/1.2 var(--font-family-toc); + margin-top: 5px; + padding-left: 10px; + padding-top: 2px; +} + +div.toc h3 { + font: bold 12px/1.2 var(--font-family-toc); + color: var(--toc-header-color); + border-bottom: 0 none; + margin: 0; +} + +div.toc ul { + list-style: none outside none; + border: medium none; + padding: 0px; +} + +div.toc li.level1 { + margin-left: 0px; +} + +div.toc li.level2 { + margin-left: 15px; +} + +div.toc li.level3 { + margin-left: 15px; +} + +div.toc li.level4 { + margin-left: 15px; +} + +span.emoji { + /* font family used at the site: https://unicode.org/emoji/charts/full-emoji-list.html + * font-family: "Noto Color Emoji", "Apple Color Emoji", "Segoe UI Emoji", Times, Symbola, Aegyptus, Code2000, Code2001, Code2002, Musica, serif, LastResort; + */ +} + +span.obfuscator { + display: none; +} + +.inherit_header { + font-weight: bold; + color: var(--inherit-header-color); + cursor: pointer; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +.inherit_header td { + padding: 6px 0px 2px 5px; +} + +.inherit { + display: none; +} + +tr.heading h2 { + margin-top: 12px; + margin-bottom: 4px; +} + +/* tooltip related style info */ + +.ttc { + position: absolute; + display: none; +} + +#powerTip { + cursor: default; + /*white-space: nowrap;*/ + color: var(--tooltip-foreground-color); + background-color: var(--tooltip-background-color); + border: 1px solid var(--tooltip-border-color); + border-radius: 4px 4px 4px 4px; + box-shadow: var(--tooltip-shadow); + display: none; + font-size: smaller; + max-width: 80%; + opacity: 0.9; + padding: 1ex 1em 1em; + position: absolute; + z-index: 2147483647; +} + +#powerTip div.ttdoc { + color: var(--tooltip-doc-color); + font-style: italic; +} + +#powerTip div.ttname a { + font-weight: bold; +} + +#powerTip a { + color: var(--tooltip-link-color); +} + +#powerTip div.ttname { + font-weight: bold; +} + +#powerTip div.ttdeci { + color: var(--tooltip-declaration-color); +} + +#powerTip div { + margin: 0px; + padding: 0px; + font-size: 12px; + font-family: var(--font-family-tooltip); + line-height: 16px; +} + +#powerTip:before, #powerTip:after { + content: ""; + position: absolute; + margin: 0px; +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.s:after, #powerTip.s:before, +#powerTip.w:after, #powerTip.w:before, +#powerTip.e:after, #powerTip.e:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.nw:after, #powerTip.nw:before, +#powerTip.sw:after, #powerTip.sw:before { + border: solid transparent; + content: " "; + height: 0; + width: 0; + position: absolute; +} + +#powerTip.n:after, #powerTip.s:after, +#powerTip.w:after, #powerTip.e:after, +#powerTip.nw:after, #powerTip.ne:after, +#powerTip.sw:after, #powerTip.se:after { + border-color: rgba(255, 255, 255, 0); +} + +#powerTip.n:before, #powerTip.s:before, +#powerTip.w:before, #powerTip.e:before, +#powerTip.nw:before, #powerTip.ne:before, +#powerTip.sw:before, #powerTip.se:before { + border-color: rgba(128, 128, 128, 0); +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.nw:after, #powerTip.nw:before { + top: 100%; +} + +#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { + border-top-color: var(--tooltip-background-color); + border-width: 10px; + margin: 0px -10px; +} +#powerTip.n:before, #powerTip.ne:before, #powerTip.nw:before { + border-top-color: var(--tooltip-border-color); + border-width: 11px; + margin: 0px -11px; +} +#powerTip.n:after, #powerTip.n:before { + left: 50%; +} + +#powerTip.nw:after, #powerTip.nw:before { + right: 14px; +} + +#powerTip.ne:after, #powerTip.ne:before { + left: 14px; +} + +#powerTip.s:after, #powerTip.s:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.sw:after, #powerTip.sw:before { + bottom: 100%; +} + +#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { + border-bottom-color: var(--tooltip-background-color); + border-width: 10px; + margin: 0px -10px; +} + +#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { + border-bottom-color: var(--tooltip-border-color); + border-width: 11px; + margin: 0px -11px; +} + +#powerTip.s:after, #powerTip.s:before { + left: 50%; +} + +#powerTip.sw:after, #powerTip.sw:before { + right: 14px; +} + +#powerTip.se:after, #powerTip.se:before { + left: 14px; +} + +#powerTip.e:after, #powerTip.e:before { + left: 100%; +} +#powerTip.e:after { + border-left-color: var(--tooltip-border-color); + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.e:before { + border-left-color: var(--tooltip-border-color); + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +#powerTip.w:after, #powerTip.w:before { + right: 100%; +} +#powerTip.w:after { + border-right-color: var(--tooltip-border-color); + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.w:before { + border-right-color: var(--tooltip-border-color); + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +@media print +{ + #top { display: none; } + #side-nav { display: none; } + #nav-path { display: none; } + body { overflow:visible; } + h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } + .summary { display: none; } + .memitem { page-break-inside: avoid; } + #doc-content + { + margin-left:0 !important; + height:auto !important; + width:auto !important; + overflow:inherit; + display:inline; + } +} + +/* @group Markdown */ + +table.markdownTable { + border-collapse:collapse; + margin-top: 4px; + margin-bottom: 4px; +} + +table.markdownTable td, table.markdownTable th { + border: 1px solid var(--table-cell-border-color); + padding: 3px 7px 2px; +} + +table.markdownTable tr { +} + +th.markdownTableHeadLeft, th.markdownTableHeadRight, th.markdownTableHeadCenter, th.markdownTableHeadNone { + background-color: var(--table-header-background-color); + color: var(--table-header-foreground-color); + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; +} + +th.markdownTableHeadLeft, td.markdownTableBodyLeft { + text-align: left +} + +th.markdownTableHeadRight, td.markdownTableBodyRight { + text-align: right +} + +th.markdownTableHeadCenter, td.markdownTableBodyCenter { + text-align: center +} + +tt, code, kbd, samp +{ + display: inline-block; +} +/* @end */ + +u { + text-decoration: underline; +} + +details>summary { + list-style-type: none; +} + +details > summary::-webkit-details-marker { + display: none; +} + +details>summary::before { + content: "\25ba"; + padding-right:4px; + font-size: 80%; +} + +details[open]>summary::before { + content: "\25bc"; + padding-right:4px; + font-size: 80%; +} + +body { + scrollbar-color: var(--scrollbar-thumb-color) var(--scrollbar-background-color); +} + +::-webkit-scrollbar { + background-color: var(--scrollbar-background-color); + height: 12px; + width: 12px; +} +::-webkit-scrollbar-thumb { + border-radius: 6px; + box-shadow: inset 0 0 12px 12px var(--scrollbar-thumb-color); + border: solid 2px transparent; +} +::-webkit-scrollbar-corner { + background-color: var(--scrollbar-background-color); +} + diff --git a/Jupyter/html/doxygen.svg b/Jupyter/html/doxygen.svg new file mode 100644 index 000000000..79a763540 --- /dev/null +++ b/Jupyter/html/doxygen.svg @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Jupyter/html/doxygen_crawl.html b/Jupyter/html/doxygen_crawl.html new file mode 100644 index 000000000..f875241e7 --- /dev/null +++ b/Jupyter/html/doxygen_crawl.html @@ -0,0 +1,20 @@ + + + +Validator / crawler helper + + + + + + + + + + + + + + + + diff --git a/Jupyter/html/dynsections.js b/Jupyter/html/dynsections.js new file mode 100644 index 000000000..b05f4c8d7 --- /dev/null +++ b/Jupyter/html/dynsections.js @@ -0,0 +1,198 @@ +/* + @licstart The following is the entire license notice for the JavaScript code in this file. + + The MIT License (MIT) + + Copyright (C) 1997-2020 by Dimitri van Heesch + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software + and associated documentation files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, publish, distribute, + sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or + substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + @licend The above is the entire license notice for the JavaScript code in this file + */ + +function toggleVisibility(linkObj) { + return dynsection.toggleVisibility(linkObj); +} + +let dynsection = { + + // helper function + updateStripes : function() { + $('table.directory tr'). + removeClass('even').filter(':visible:even').addClass('even'); + $('table.directory tr'). + removeClass('odd').filter(':visible:odd').addClass('odd'); + }, + + toggleVisibility : function(linkObj) { + const base = $(linkObj).attr('id'); + const summary = $('#'+base+'-summary'); + const content = $('#'+base+'-content'); + const trigger = $('#'+base+'-trigger'); + const src=$(trigger).attr('src'); + if (content.is(':visible')===true) { + content.hide(); + summary.show(); + $(linkObj).addClass('closed').removeClass('opened'); + $(trigger).attr('src',src.substring(0,src.length-8)+'closed.png'); + } else { + content.show(); + summary.hide(); + $(linkObj).removeClass('closed').addClass('opened'); + $(trigger).attr('src',src.substring(0,src.length-10)+'open.png'); + } + return false; + }, + + toggleLevel : function(level) { + $('table.directory tr').each(function() { + const l = this.id.split('_').length-1; + const i = $('#img'+this.id.substring(3)); + const a = $('#arr'+this.id.substring(3)); + if (l'); + // add vertical lines to other rows + $('span[class=lineno]').not(':eq(0)').append(''); + // add toggle controls to lines with fold divs + $('div[class=foldopen]').each(function() { + // extract specific id to use + const id = $(this).attr('id').replace('foldopen',''); + // extract start and end foldable fragment attributes + const start = $(this).attr('data-start'); + const end = $(this).attr('data-end'); + // replace normal fold span with controls for the first line of a foldable fragment + $(this).find('span[class=fold]:first').replaceWith(''); + // append div for folded (closed) representation + $(this).after(''); + // extract the first line from the "open" section to represent closed content + const line = $(this).children().first().clone(); + // remove any glow that might still be active on the original line + $(line).removeClass('glow'); + if (start) { + // if line already ends with a start marker (e.g. trailing {), remove it + $(line).html($(line).html().replace(new RegExp('\\s*'+start+'\\s*$','g'),'')); + } + // replace minus with plus symbol + $(line).find('span[class=fold]').css('background-image',codefold.plusImg[relPath]); + // append ellipsis + $(line).append(' '+start+''+end); + // insert constructed line into closed div + $('#foldclosed'+id).html(line); + }); + }, +}; +/* @license-end */ diff --git a/Jupyter/html/folderclosed.svg b/Jupyter/html/folderclosed.svg new file mode 100644 index 000000000..b04bed2e7 --- /dev/null +++ b/Jupyter/html/folderclosed.svg @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/Jupyter/html/folderclosedd.svg b/Jupyter/html/folderclosedd.svg new file mode 100644 index 000000000..52f0166a2 --- /dev/null +++ b/Jupyter/html/folderclosedd.svg @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/Jupyter/html/folderopen.svg b/Jupyter/html/folderopen.svg new file mode 100644 index 000000000..f6896dd25 --- /dev/null +++ b/Jupyter/html/folderopen.svg @@ -0,0 +1,17 @@ + + + + + + + + + + diff --git a/Jupyter/html/folderopend.svg b/Jupyter/html/folderopend.svg new file mode 100644 index 000000000..2d1f06e7b --- /dev/null +++ b/Jupyter/html/folderopend.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + diff --git a/Jupyter/html/images b/Jupyter/html/images new file mode 120000 index 000000000..5e6757319 --- /dev/null +++ b/Jupyter/html/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/Jupyter/html/index.html b/Jupyter/html/index.html new file mode 100644 index 000000000..ed39b1f0a --- /dev/null +++ b/Jupyter/html/index.html @@ -0,0 +1,111 @@ + + + + + + + + +IMP Tutorial: Main Page + + + + + + + + + + + + + + +
+ + + + + +
+ IMP logo + +
+
+
+
+
+
+
+ + + + + + +
+
IMP Tutorial +
+
+
+ + + + + + + +
+
+ +
+
IMP Tutorial Documentation
+
+
+ +
+
+ + + + + diff --git a/Jupyter/html/jquery.js b/Jupyter/html/jquery.js new file mode 100644 index 000000000..1dffb65b5 --- /dev/null +++ b/Jupyter/html/jquery.js @@ -0,0 +1,34 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",options:{classes:{},disabled:!1,create:null},_createWidget:function(t,e){e=y(e||this.defaultElement||this)[0],this.element=y(e),this.uuid=i++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=y(),this.hoverable=y(),this.focusable=y(),this.classesElementLookup={},e!==this&&(y.data(e,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===e&&this.destroy()}}),this.document=y(e.style?e.ownerDocument:e.document||e),this.window=y(this.document[0].defaultView||this.document[0].parentWindow)),this.options=y.widget.extend({},this.options,this._getCreateOptions(),t),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:y.noop,_create:y.noop,_init:y.noop,destroy:function(){var i=this;this._destroy(),y.each(this.classesElementLookup,function(t,e){i._removeClass(e,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:y.noop,widget:function(){return this.element},option:function(t,e){var i,s,n,o=t;if(0===arguments.length)return y.widget.extend({},this.options);if("string"==typeof t)if(o={},t=(i=t.split(".")).shift(),i.length){for(s=o[t]=y.widget.extend({},this.options[t]),n=0;n
"),i=e.children()[0];return y("body").append(e),t=i.offsetWidth,e.css("overflow","scroll"),t===(i=i.offsetWidth)&&(i=e[0].clientWidth),e.remove(),s=t-i},getScrollInfo:function(t){var e=t.isWindow||t.isDocument?"":t.element.css("overflow-x"),i=t.isWindow||t.isDocument?"":t.element.css("overflow-y"),e="scroll"===e||"auto"===e&&t.widthx(D(s),D(n))?o.important="horizontal":o.important="vertical",p.using.call(this,t,o)}),h.offset(y.extend(l,{using:t}))})},y.ui.position={fit:{left:function(t,e){var i=e.within,s=i.isWindow?i.scrollLeft:i.offset.left,n=i.width,o=t.left-e.collisionPosition.marginLeft,h=s-o,a=o+e.collisionWidth-n-s;e.collisionWidth>n?0n?0=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),y.ui.plugin={add:function(t,e,i){var s,n=y.ui[t].prototype;for(s in i)n.plugins[s]=n.plugins[s]||[],n.plugins[s].push([e,i[s]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;n").css({overflow:"hidden",position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,t={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(t),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(t),this._proportionallyResize()),this._setupHandles(),e.autoHide&&y(this.element).on("mouseenter",function(){e.disabled||(i._removeClass("ui-resizable-autohide"),i._handles.show())}).on("mouseleave",function(){e.disabled||i.resizing||(i._addClass("ui-resizable-autohide"),i._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy(),this._addedHandles.remove();function t(t){y(t).removeData("resizable").removeData("ui-resizable").off(".resizable")}var e;return this.elementIsWrapper&&(t(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),t(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;case"aspectRatio":this._aspectRatio=!!e}},_setupHandles:function(){var t,e,i,s,n,o=this.options,h=this;if(this.handles=o.handles||(y(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=y(),this._addedHandles=y(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),i=this.handles.split(","),this.handles={},e=0;e"),this._addClass(n,"ui-resizable-handle "+s),n.css({zIndex:o.zIndex}),this.handles[t]=".ui-resizable-"+t,this.element.children(this.handles[t]).length||(this.element.append(n),this._addedHandles=this._addedHandles.add(n));this._renderAxis=function(t){var e,i,s;for(e in t=t||this.element,this.handles)this.handles[e].constructor===String?this.handles[e]=this.element.children(this.handles[e]).first().show():(this.handles[e].jquery||this.handles[e].nodeType)&&(this.handles[e]=y(this.handles[e]),this._on(this.handles[e],{mousedown:h._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(i=y(this.handles[e],this.element),s=/sw|ne|nw|se|n|s/.test(e)?i.outerHeight():i.outerWidth(),i=["padding",/ne|nw|n/.test(e)?"Top":/se|sw|s/.test(e)?"Bottom":/^e$/.test(e)?"Right":"Left"].join(""),t.css(i,s),this._proportionallyResize()),this._handles=this._handles.add(this.handles[e])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){h.resizing||(this.className&&(n=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),h.axis=n&&n[1]?n[1]:"se")}),o.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._addedHandles.remove()},_mouseCapture:function(t){var e,i,s=!1;for(e in this.handles)(i=y(this.handles[e])[0])!==t.target&&!y.contains(i,t.target)||(s=!0);return!this.options.disabled&&s},_mouseStart:function(t){var e,i,s=this.options,n=this.element;return this.resizing=!0,this._renderProxy(),e=this._num(this.helper.css("left")),i=this._num(this.helper.css("top")),s.containment&&(e+=y(s.containment).scrollLeft()||0,i+=y(s.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:e,top:i},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:n.width(),height:n.height()},this.originalSize=this._helper?{width:n.outerWidth(),height:n.outerHeight()}:{width:n.width(),height:n.height()},this.sizeDiff={width:n.outerWidth()-n.width(),height:n.outerHeight()-n.height()},this.originalPosition={left:e,top:i},this.originalMousePosition={left:t.pageX,top:t.pageY},this.aspectRatio="number"==typeof s.aspectRatio?s.aspectRatio:this.originalSize.width/this.originalSize.height||1,s=y(".ui-resizable-"+this.axis).css("cursor"),y("body").css("cursor","auto"===s?this.axis+"-resize":s),this._addClass("ui-resizable-resizing"),this._propagate("start",t),!0},_mouseDrag:function(t){var e=this.originalMousePosition,i=this.axis,s=t.pageX-e.left||0,e=t.pageY-e.top||0,i=this._change[i];return this._updatePrevProperties(),i&&(e=i.apply(this,[t,s,e]),this._updateVirtualBoundaries(t.shiftKey),(this._aspectRatio||t.shiftKey)&&(e=this._updateRatio(e,t)),e=this._respectSize(e,t),this._updateCache(e),this._propagate("resize",t),e=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),y.isEmptyObject(e)||(this._updatePrevProperties(),this._trigger("resize",t,this.ui()),this._applyChanges())),!1},_mouseStop:function(t){this.resizing=!1;var e,i,s,n=this.options,o=this;return this._helper&&(s=(e=(i=this._proportionallyResizeElements).length&&/textarea/i.test(i[0].nodeName))&&this._hasScroll(i[0],"left")?0:o.sizeDiff.height,i=e?0:o.sizeDiff.width,e={width:o.helper.width()-i,height:o.helper.height()-s},i=parseFloat(o.element.css("left"))+(o.position.left-o.originalPosition.left)||null,s=parseFloat(o.element.css("top"))+(o.position.top-o.originalPosition.top)||null,n.animate||this.element.css(y.extend(e,{top:s,left:i})),o.helper.height(o.size.height),o.helper.width(o.size.width),this._helper&&!n.animate&&this._proportionallyResize()),y("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",t),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s=this.options,n={minWidth:this._isNumber(s.minWidth)?s.minWidth:0,maxWidth:this._isNumber(s.maxWidth)?s.maxWidth:1/0,minHeight:this._isNumber(s.minHeight)?s.minHeight:0,maxHeight:this._isNumber(s.maxHeight)?s.maxHeight:1/0};(this._aspectRatio||t)&&(e=n.minHeight*this.aspectRatio,i=n.minWidth/this.aspectRatio,s=n.maxHeight*this.aspectRatio,t=n.maxWidth/this.aspectRatio,e>n.minWidth&&(n.minWidth=e),i>n.minHeight&&(n.minHeight=i),st.width,h=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,a=this.originalPosition.left+this.originalSize.width,r=this.originalPosition.top+this.originalSize.height,l=/sw|nw|w/.test(i),i=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),h&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&l&&(t.left=a-e.minWidth),s&&l&&(t.left=a-e.maxWidth),h&&i&&(t.top=r-e.minHeight),n&&i&&(t.top=r-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];e<4;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;e").css({overflow:"hidden"}),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++e.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize;return{left:this.originalPosition.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize;return{top:this.originalPosition.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(t,e,i){return y.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[t,e,i]))},sw:function(t,e,i){return y.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[t,e,i]))},ne:function(t,e,i){return y.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[t,e,i]))},nw:function(t,e,i){return y.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[t,e,i]))}},_propagate:function(t,e){y.ui.plugin.call(this,t,[e,this.ui()]),"resize"!==t&&this._trigger(t,e,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),y.ui.plugin.add("resizable","animate",{stop:function(e){var i=y(this).resizable("instance"),t=i.options,s=i._proportionallyResizeElements,n=s.length&&/textarea/i.test(s[0].nodeName),o=n&&i._hasScroll(s[0],"left")?0:i.sizeDiff.height,h=n?0:i.sizeDiff.width,n={width:i.size.width-h,height:i.size.height-o},h=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left)||null,o=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(y.extend(n,o&&h?{top:o,left:h}:{}),{duration:t.animateDuration,easing:t.animateEasing,step:function(){var t={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};s&&s.length&&y(s[0]).css({width:t.width,height:t.height}),i._updateCache(t),i._propagate("resize",e)}})}}),y.ui.plugin.add("resizable","containment",{start:function(){var i,s,n=y(this).resizable("instance"),t=n.options,e=n.element,o=t.containment,h=o instanceof y?o.get(0):/parent/.test(o)?e.parent().get(0):o;h&&(n.containerElement=y(h),/document/.test(o)||o===document?(n.containerOffset={left:0,top:0},n.containerPosition={left:0,top:0},n.parentData={element:y(document),left:0,top:0,width:y(document).width(),height:y(document).height()||document.body.parentNode.scrollHeight}):(i=y(h),s=[],y(["Top","Right","Left","Bottom"]).each(function(t,e){s[t]=n._num(i.css("padding"+e))}),n.containerOffset=i.offset(),n.containerPosition=i.position(),n.containerSize={height:i.innerHeight()-s[3],width:i.innerWidth()-s[1]},t=n.containerOffset,e=n.containerSize.height,o=n.containerSize.width,o=n._hasScroll(h,"left")?h.scrollWidth:o,e=n._hasScroll(h)?h.scrollHeight:e,n.parentData={element:h,left:t.left,top:t.top,width:o,height:e}))},resize:function(t){var e=y(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.position,o=e._aspectRatio||t.shiftKey,h={top:0,left:0},a=e.containerElement,t=!0;a[0]!==document&&/static/.test(a.css("position"))&&(h=s),n.left<(e._helper?s.left:0)&&(e.size.width=e.size.width+(e._helper?e.position.left-s.left:e.position.left-h.left),o&&(e.size.height=e.size.width/e.aspectRatio,t=!1),e.position.left=i.helper?s.left:0),n.top<(e._helper?s.top:0)&&(e.size.height=e.size.height+(e._helper?e.position.top-s.top:e.position.top),o&&(e.size.width=e.size.height*e.aspectRatio,t=!1),e.position.top=e._helper?s.top:0),i=e.containerElement.get(0)===e.element.parent().get(0),n=/relative|absolute/.test(e.containerElement.css("position")),i&&n?(e.offset.left=e.parentData.left+e.position.left,e.offset.top=e.parentData.top+e.position.top):(e.offset.left=e.element.offset().left,e.offset.top=e.element.offset().top),n=Math.abs(e.sizeDiff.width+(e._helper?e.offset.left-h.left:e.offset.left-s.left)),s=Math.abs(e.sizeDiff.height+(e._helper?e.offset.top-h.top:e.offset.top-s.top)),n+e.size.width>=e.parentData.width&&(e.size.width=e.parentData.width-n,o&&(e.size.height=e.size.width/e.aspectRatio,t=!1)),s+e.size.height>=e.parentData.height&&(e.size.height=e.parentData.height-s,o&&(e.size.width=e.size.height*e.aspectRatio,t=!1)),t||(e.position.left=e.prevPosition.left,e.position.top=e.prevPosition.top,e.size.width=e.prevSize.width,e.size.height=e.prevSize.height)},stop:function(){var t=y(this).resizable("instance"),e=t.options,i=t.containerOffset,s=t.containerPosition,n=t.containerElement,o=y(t.helper),h=o.offset(),a=o.outerWidth()-t.sizeDiff.width,o=o.outerHeight()-t.sizeDiff.height;t._helper&&!e.animate&&/relative/.test(n.css("position"))&&y(this).css({left:h.left-s.left-i.left,width:a,height:o}),t._helper&&!e.animate&&/static/.test(n.css("position"))&&y(this).css({left:h.left-s.left-i.left,width:a,height:o})}}),y.ui.plugin.add("resizable","alsoResize",{start:function(){var t=y(this).resizable("instance").options;y(t.alsoResize).each(function(){var t=y(this);t.data("ui-resizable-alsoresize",{width:parseFloat(t.width()),height:parseFloat(t.height()),left:parseFloat(t.css("left")),top:parseFloat(t.css("top"))})})},resize:function(t,i){var e=y(this).resizable("instance"),s=e.options,n=e.originalSize,o=e.originalPosition,h={height:e.size.height-n.height||0,width:e.size.width-n.width||0,top:e.position.top-o.top||0,left:e.position.left-o.left||0};y(s.alsoResize).each(function(){var t=y(this),s=y(this).data("ui-resizable-alsoresize"),n={},e=t.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];y.each(e,function(t,e){var i=(s[e]||0)+(h[e]||0);i&&0<=i&&(n[e]=i||null)}),t.css(n)})},stop:function(){y(this).removeData("ui-resizable-alsoresize")}}),y.ui.plugin.add("resizable","ghost",{start:function(){var t=y(this).resizable("instance"),e=t.size;t.ghost=t.originalElement.clone(),t.ghost.css({opacity:.25,display:"block",position:"relative",height:e.height,width:e.width,margin:0,left:0,top:0}),t._addClass(t.ghost,"ui-resizable-ghost"),!1!==y.uiBackCompat&&"string"==typeof t.options.ghost&&t.ghost.addClass(this.options.ghost),t.ghost.appendTo(t.helper)},resize:function(){var t=y(this).resizable("instance");t.ghost&&t.ghost.css({position:"relative",height:t.size.height,width:t.size.width})},stop:function(){var t=y(this).resizable("instance");t.ghost&&t.helper&&t.helper.get(0).removeChild(t.ghost.get(0))}}),y.ui.plugin.add("resizable","grid",{resize:function(){var t,e=y(this).resizable("instance"),i=e.options,s=e.size,n=e.originalSize,o=e.originalPosition,h=e.axis,a="number"==typeof i.grid?[i.grid,i.grid]:i.grid,r=a[0]||1,l=a[1]||1,u=Math.round((s.width-n.width)/r)*r,p=Math.round((s.height-n.height)/l)*l,d=n.width+u,c=n.height+p,f=i.maxWidth&&i.maxWidthd,s=i.minHeight&&i.minHeight>c;i.grid=a,m&&(d+=r),s&&(c+=l),f&&(d-=r),g&&(c-=l),/^(se|s|e)$/.test(h)?(e.size.width=d,e.size.height=c):/^(ne)$/.test(h)?(e.size.width=d,e.size.height=c,e.position.top=o.top-p):/^(sw)$/.test(h)?(e.size.width=d,e.size.height=c,e.position.left=o.left-u):((c-l<=0||d-r<=0)&&(t=e._getPaddingPlusBorderDimensions(this)),0=f[g]?0:Math.min(f[g],n));!a&&1-1){targetElements.on(evt+EVENT_NAMESPACE,function elementToggle(event){$.powerTip.toggle(this,event)})}else{targetElements.on(evt+EVENT_NAMESPACE,function elementOpen(event){$.powerTip.show(this,event)})}});$.each(options.closeEvents,function(idx,evt){if($.inArray(evt,options.openEvents)<0){targetElements.on(evt+EVENT_NAMESPACE,function elementClose(event){$.powerTip.hide(this,!isMouseEvent(event))})}});targetElements.on("keydown"+EVENT_NAMESPACE,function elementKeyDown(event){if(event.keyCode===27){$.powerTip.hide(this,true)}})}return targetElements};$.fn.powerTip.defaults={fadeInTime:200,fadeOutTime:100,followMouse:false,popupId:"powerTip",popupClass:null,intentSensitivity:7,intentPollInterval:100,closeDelay:100,placement:"n",smartPlacement:false,offset:10,mouseOnToPopup:false,manual:false,openEvents:["mouseenter","focus"],closeEvents:["mouseleave","blur"]};$.fn.powerTip.smartPlacementLists={n:["n","ne","nw","s"],e:["e","ne","se","w","nw","sw","n","s","e"],s:["s","se","sw","n"],w:["w","nw","sw","e","ne","se","n","s","w"],nw:["nw","w","sw","n","s","se","nw"],ne:["ne","e","se","n","s","sw","ne"],sw:["sw","w","nw","s","n","ne","sw"],se:["se","e","ne","s","n","nw","se"],"nw-alt":["nw-alt","n","ne-alt","sw-alt","s","se-alt","w","e"],"ne-alt":["ne-alt","n","nw-alt","se-alt","s","sw-alt","e","w"],"sw-alt":["sw-alt","s","se-alt","nw-alt","n","ne-alt","w","e"],"se-alt":["se-alt","s","sw-alt","ne-alt","n","nw-alt","e","w"]};$.powerTip={show:function apiShowTip(element,event){if(isMouseEvent(event)){trackMouse(event);session.previousX=event.pageX;session.previousY=event.pageY;$(element).data(DATA_DISPLAYCONTROLLER).show()}else{$(element).first().data(DATA_DISPLAYCONTROLLER).show(true,true)}return element},reposition:function apiResetPosition(element){$(element).first().data(DATA_DISPLAYCONTROLLER).resetPosition();return element},hide:function apiCloseTip(element,immediate){var displayController;immediate=element?immediate:true;if(element){displayController=$(element).first().data(DATA_DISPLAYCONTROLLER)}else if(session.activeHover){displayController=session.activeHover.data(DATA_DISPLAYCONTROLLER)}if(displayController){displayController.hide(immediate)}return element},toggle:function apiToggle(element,event){if(session.activeHover&&session.activeHover.is(element)){$.powerTip.hide(element,!isMouseEvent(event))}else{$.powerTip.show(element,event)}return element}};$.powerTip.showTip=$.powerTip.show;$.powerTip.closeTip=$.powerTip.hide;function CSSCoordinates(){var me=this;me.top="auto";me.left="auto";me.right="auto";me.bottom="auto";me.set=function(property,value){if($.isNumeric(value)){me[property]=Math.round(value)}}}function DisplayController(element,options,tipController){var hoverTimer=null,myCloseDelay=null;function openTooltip(immediate,forceOpen){cancelTimer();if(!element.data(DATA_HASACTIVEHOVER)){if(!immediate){session.tipOpenImminent=true;hoverTimer=setTimeout(function intentDelay(){hoverTimer=null;checkForIntent()},options.intentPollInterval)}else{if(forceOpen){element.data(DATA_FORCEDOPEN,true)}closeAnyDelayed();tipController.showTip(element)}}else{cancelClose()}}function closeTooltip(disableDelay){if(myCloseDelay){myCloseDelay=session.closeDelayTimeout=clearTimeout(myCloseDelay);session.delayInProgress=false}cancelTimer();session.tipOpenImminent=false;if(element.data(DATA_HASACTIVEHOVER)){element.data(DATA_FORCEDOPEN,false);if(!disableDelay){session.delayInProgress=true;session.closeDelayTimeout=setTimeout(function closeDelay(){session.closeDelayTimeout=null;tipController.hideTip(element);session.delayInProgress=false;myCloseDelay=null},options.closeDelay);myCloseDelay=session.closeDelayTimeout}else{tipController.hideTip(element)}}}function checkForIntent(){var xDifference=Math.abs(session.previousX-session.currentX),yDifference=Math.abs(session.previousY-session.currentY),totalDifference=xDifference+yDifference;if(totalDifference",{id:options.popupId});if($body.length===0){$body=$("body")}$body.append(tipElement);session.tooltips=session.tooltips?session.tooltips.add(tipElement):tipElement}if(options.followMouse){if(!tipElement.data(DATA_HASMOUSEMOVE)){$document.on("mousemove"+EVENT_NAMESPACE,positionTipOnCursor);$window.on("scroll"+EVENT_NAMESPACE,positionTipOnCursor);tipElement.data(DATA_HASMOUSEMOVE,true)}}function beginShowTip(element){element.data(DATA_HASACTIVEHOVER,true);tipElement.queue(function queueTipInit(next){showTip(element);next()})}function showTip(element){var tipContent;if(!element.data(DATA_HASACTIVEHOVER)){return}if(session.isTipOpen){if(!session.isClosing){hideTip(session.activeHover)}tipElement.delay(100).queue(function queueTipAgain(next){showTip(element);next()});return}element.trigger("powerTipPreRender");tipContent=getTooltipContent(element);if(tipContent){tipElement.empty().append(tipContent)}else{return}element.trigger("powerTipRender");session.activeHover=element;session.isTipOpen=true;tipElement.data(DATA_MOUSEONTOTIP,options.mouseOnToPopup);tipElement.addClass(options.popupClass);if(!options.followMouse||element.data(DATA_FORCEDOPEN)){positionTipOnElement(element);session.isFixedTipOpen=true}else{positionTipOnCursor()}if(!element.data(DATA_FORCEDOPEN)&&!options.followMouse){$document.on("click"+EVENT_NAMESPACE,function documentClick(event){var target=event.target;if(target!==element[0]){if(options.mouseOnToPopup){if(target!==tipElement[0]&&!$.contains(tipElement[0],target)){$.powerTip.hide()}}else{$.powerTip.hide()}}})}if(options.mouseOnToPopup&&!options.manual){tipElement.on("mouseenter"+EVENT_NAMESPACE,function tipMouseEnter(){if(session.activeHover){session.activeHover.data(DATA_DISPLAYCONTROLLER).cancel()}});tipElement.on("mouseleave"+EVENT_NAMESPACE,function tipMouseLeave(){if(session.activeHover){session.activeHover.data(DATA_DISPLAYCONTROLLER).hide()}})}tipElement.fadeIn(options.fadeInTime,function fadeInCallback(){if(!session.desyncTimeout){session.desyncTimeout=setInterval(closeDesyncedTip,500)}element.trigger("powerTipOpen")})}function hideTip(element){session.isClosing=true;session.isTipOpen=false;session.desyncTimeout=clearInterval(session.desyncTimeout);element.data(DATA_HASACTIVEHOVER,false);element.data(DATA_FORCEDOPEN,false);$document.off("click"+EVENT_NAMESPACE);tipElement.off(EVENT_NAMESPACE);tipElement.fadeOut(options.fadeOutTime,function fadeOutCallback(){var coords=new CSSCoordinates;session.activeHover=null;session.isClosing=false;session.isFixedTipOpen=false;tipElement.removeClass();coords.set("top",session.currentY+options.offset);coords.set("left",session.currentX+options.offset);tipElement.css(coords);element.trigger("powerTipClose")})}function positionTipOnCursor(){var tipWidth,tipHeight,coords,collisions,collisionCount;if(!session.isFixedTipOpen&&(session.isTipOpen||session.tipOpenImminent&&tipElement.data(DATA_HASMOUSEMOVE))){tipWidth=tipElement.outerWidth();tipHeight=tipElement.outerHeight();coords=new CSSCoordinates;coords.set("top",session.currentY+options.offset);coords.set("left",session.currentX+options.offset);collisions=getViewportCollisions(coords,tipWidth,tipHeight);if(collisions!==Collision.none){collisionCount=countFlags(collisions);if(collisionCount===1){if(collisions===Collision.right){coords.set("left",session.scrollLeft+session.windowWidth-tipWidth)}else if(collisions===Collision.bottom){coords.set("top",session.scrollTop+session.windowHeight-tipHeight)}}else{coords.set("left",session.currentX-tipWidth-options.offset);coords.set("top",session.currentY-tipHeight-options.offset)}}tipElement.css(coords)}}function positionTipOnElement(element){var priorityList,finalPlacement;if(options.smartPlacement||options.followMouse&&element.data(DATA_FORCEDOPEN)){priorityList=$.fn.powerTip.smartPlacementLists[options.placement];$.each(priorityList,function(idx,pos){var collisions=getViewportCollisions(placeTooltip(element,pos),tipElement.outerWidth(),tipElement.outerHeight());finalPlacement=pos;return collisions!==Collision.none})}else{placeTooltip(element,options.placement);finalPlacement=options.placement}tipElement.removeClass("w nw sw e ne se n s w se-alt sw-alt ne-alt nw-alt");tipElement.addClass(finalPlacement)}function placeTooltip(element,placement){var iterationCount=0,tipWidth,tipHeight,coords=new CSSCoordinates;coords.set("top",0);coords.set("left",0);tipElement.css(coords);do{tipWidth=tipElement.outerWidth();tipHeight=tipElement.outerHeight();coords=placementCalculator.compute(element,placement,tipWidth,tipHeight,options.offset);tipElement.css(coords)}while(++iterationCount<=5&&(tipWidth!==tipElement.outerWidth()||tipHeight!==tipElement.outerHeight()));return coords}function closeDesyncedTip(){var isDesynced=false,hasDesyncableCloseEvent=$.grep(["mouseleave","mouseout","blur","focusout"],function(eventType){return $.inArray(eventType,options.closeEvents)!==-1}).length>0;if(session.isTipOpen&&!session.isClosing&&!session.delayInProgress&&hasDesyncableCloseEvent){if(session.activeHover.data(DATA_HASACTIVEHOVER)===false||session.activeHover.is(":disabled")){isDesynced=true}else if(!isMouseOver(session.activeHover)&&!session.activeHover.is(":focus")&&!session.activeHover.data(DATA_FORCEDOPEN)){if(tipElement.data(DATA_MOUSEONTOTIP)){if(!isMouseOver(tipElement)){isDesynced=true}}else{isDesynced=true}}if(isDesynced){hideTip(session.activeHover)}}}this.showTip=beginShowTip;this.hideTip=hideTip;this.resetPosition=positionTipOnElement}function isSvgElement(element){return Boolean(window.SVGElement&&element[0]instanceof SVGElement)}function isMouseEvent(event){return Boolean(event&&$.inArray(event.type,MOUSE_EVENTS)>-1&&typeof event.pageX==="number")}function initTracking(){if(!session.mouseTrackingActive){session.mouseTrackingActive=true;getViewportDimensions();$(getViewportDimensions);$document.on("mousemove"+EVENT_NAMESPACE,trackMouse);$window.on("resize"+EVENT_NAMESPACE,trackResize);$window.on("scroll"+EVENT_NAMESPACE,trackScroll)}}function getViewportDimensions(){session.scrollLeft=$window.scrollLeft();session.scrollTop=$window.scrollTop();session.windowWidth=$window.width();session.windowHeight=$window.height()}function trackResize(){session.windowWidth=$window.width();session.windowHeight=$window.height()}function trackScroll(){var x=$window.scrollLeft(),y=$window.scrollTop();if(x!==session.scrollLeft){session.currentX+=x-session.scrollLeft;session.scrollLeft=x}if(y!==session.scrollTop){session.currentY+=y-session.scrollTop;session.scrollTop=y}}function trackMouse(event){session.currentX=event.pageX;session.currentY=event.pageY}function isMouseOver(element){var elementPosition=element.offset(),elementBox=element[0].getBoundingClientRect(),elementWidth=elementBox.right-elementBox.left,elementHeight=elementBox.bottom-elementBox.top;return session.currentX>=elementPosition.left&&session.currentX<=elementPosition.left+elementWidth&&session.currentY>=elementPosition.top&&session.currentY<=elementPosition.top+elementHeight}function getTooltipContent(element){var tipText=element.data(DATA_POWERTIP),tipObject=element.data(DATA_POWERTIPJQ),tipTarget=element.data(DATA_POWERTIPTARGET),targetElement,content;if(tipText){if($.isFunction(tipText)){tipText=tipText.call(element[0])}content=tipText}else if(tipObject){if($.isFunction(tipObject)){tipObject=tipObject.call(element[0])}if(tipObject.length>0){content=tipObject.clone(true,true)}}else if(tipTarget){targetElement=$("#"+tipTarget);if(targetElement.length>0){content=targetElement.html()}}return content}function getViewportCollisions(coords,elementWidth,elementHeight){var viewportTop=session.scrollTop,viewportLeft=session.scrollLeft,viewportBottom=viewportTop+session.windowHeight,viewportRight=viewportLeft+session.windowWidth,collisions=Collision.none;if(coords.topviewportBottom||Math.abs(coords.bottom-session.windowHeight)>viewportBottom){collisions|=Collision.bottom}if(coords.leftviewportRight){collisions|=Collision.left}if(coords.left+elementWidth>viewportRight||coords.right1)){a.preventDefault();var c=a.originalEvent.changedTouches[0],d=document.createEvent("MouseEvents");d.initMouseEvent(b,!0,!0,window,1,c.screenX,c.screenY,c.clientX,c.clientY,!1,!1,!1,!1,0,null),a.target.dispatchEvent(d)}}if(a.support.touch="ontouchend"in document,a.support.touch){var e,b=a.ui.mouse.prototype,c=b._mouseInit,d=b._mouseDestroy;b._touchStart=function(a){var b=this;!e&&b._mouseCapture(a.originalEvent.changedTouches[0])&&(e=!0,b._touchMoved=!1,f(a,"mouseover"),f(a,"mousemove"),f(a,"mousedown"))},b._touchMove=function(a){e&&(this._touchMoved=!0,f(a,"mousemove"))},b._touchEnd=function(a){e&&(f(a,"mouseup"),f(a,"mouseout"),this._touchMoved||f(a,"click"),e=!1)},b._mouseInit=function(){var b=this;b.element.bind({touchstart:a.proxy(b,"_touchStart"),touchmove:a.proxy(b,"_touchMove"),touchend:a.proxy(b,"_touchEnd")}),c.call(b)},b._mouseDestroy=function(){var b=this;b.element.unbind({touchstart:a.proxy(b,"_touchStart"),touchmove:a.proxy(b,"_touchMove"),touchend:a.proxy(b,"_touchEnd")}),d.call(b)}}}(jQuery);/*! SmartMenus jQuery Plugin - v1.1.0 - September 17, 2017 + * http://www.smartmenus.org/ + * Copyright Vasil Dinkov, Vadikom Web Ltd. http://vadikom.com; Licensed MIT */(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):"object"==typeof module&&"object"==typeof module.exports?module.exports=t(require("jquery")):t(jQuery)})(function($){function initMouseDetection(t){var e=".smartmenus_mouse";if(mouseDetectionEnabled||t)mouseDetectionEnabled&&t&&($(document).off(e),mouseDetectionEnabled=!1);else{var i=!0,s=null,o={mousemove:function(t){var e={x:t.pageX,y:t.pageY,timeStamp:(new Date).getTime()};if(s){var o=Math.abs(s.x-e.x),a=Math.abs(s.y-e.y);if((o>0||a>0)&&2>=o&&2>=a&&300>=e.timeStamp-s.timeStamp&&(mouse=!0,i)){var n=$(t.target).closest("a");n.is("a")&&$.each(menuTrees,function(){return $.contains(this.$root[0],n[0])?(this.itemEnter({currentTarget:n[0]}),!1):void 0}),i=!1}}s=e}};o[touchEvents?"touchstart":"pointerover pointermove pointerout MSPointerOver MSPointerMove MSPointerOut"]=function(t){isTouchEvent(t.originalEvent)&&(mouse=!1)},$(document).on(getEventsNS(o,e)),mouseDetectionEnabled=!0}}function isTouchEvent(t){return!/^(4|mouse)$/.test(t.pointerType)}function getEventsNS(t,e){e||(e="");var i={};for(var s in t)i[s.split(" ").join(e+" ")+e]=t[s];return i}var menuTrees=[],mouse=!1,touchEvents="ontouchstart"in window,mouseDetectionEnabled=!1,requestAnimationFrame=window.requestAnimationFrame||function(t){return setTimeout(t,1e3/60)},cancelAnimationFrame=window.cancelAnimationFrame||function(t){clearTimeout(t)},canAnimate=!!$.fn.animate;return $.SmartMenus=function(t,e){this.$root=$(t),this.opts=e,this.rootId="",this.accessIdPrefix="",this.$subArrow=null,this.activatedItems=[],this.visibleSubMenus=[],this.showTimeout=0,this.hideTimeout=0,this.scrollTimeout=0,this.clickActivated=!1,this.focusActivated=!1,this.zIndexInc=0,this.idInc=0,this.$firstLink=null,this.$firstSub=null,this.disabled=!1,this.$disableOverlay=null,this.$touchScrollingSub=null,this.cssTransforms3d="perspective"in t.style||"webkitPerspective"in t.style,this.wasCollapsible=!1,this.init()},$.extend($.SmartMenus,{hideAll:function(){$.each(menuTrees,function(){this.menuHideAll()})},destroy:function(){for(;menuTrees.length;)menuTrees[0].destroy();initMouseDetection(!0)},prototype:{init:function(t){var e=this;if(!t){menuTrees.push(this),this.rootId=((new Date).getTime()+Math.random()+"").replace(/\D/g,""),this.accessIdPrefix="sm-"+this.rootId+"-",this.$root.hasClass("sm-rtl")&&(this.opts.rightToLeftSubMenus=!0);var i=".smartmenus";this.$root.data("smartmenus",this).attr("data-smartmenus-id",this.rootId).dataSM("level",1).on(getEventsNS({"mouseover focusin":$.proxy(this.rootOver,this),"mouseout focusout":$.proxy(this.rootOut,this),keydown:$.proxy(this.rootKeyDown,this)},i)).on(getEventsNS({mouseenter:$.proxy(this.itemEnter,this),mouseleave:$.proxy(this.itemLeave,this),mousedown:$.proxy(this.itemDown,this),focus:$.proxy(this.itemFocus,this),blur:$.proxy(this.itemBlur,this),click:$.proxy(this.itemClick,this)},i),"a"),i+=this.rootId,this.opts.hideOnClick&&$(document).on(getEventsNS({touchstart:$.proxy(this.docTouchStart,this),touchmove:$.proxy(this.docTouchMove,this),touchend:$.proxy(this.docTouchEnd,this),click:$.proxy(this.docClick,this)},i)),$(window).on(getEventsNS({"resize orientationchange":$.proxy(this.winResize,this)},i)),this.opts.subIndicators&&(this.$subArrow=$("").addClass("sub-arrow"),this.opts.subIndicatorsText&&this.$subArrow.html(this.opts.subIndicatorsText)),initMouseDetection()}if(this.$firstSub=this.$root.find("ul").each(function(){e.menuInit($(this))}).eq(0),this.$firstLink=this.$root.find("a").eq(0),this.opts.markCurrentItem){var s=/(index|default)\.[^#\?\/]*/i,o=/#.*/,a=window.location.href.replace(s,""),n=a.replace(o,"");this.$root.find("a").each(function(){var t=this.href.replace(s,""),i=$(this);(t==a||t==n)&&(i.addClass("current"),e.opts.markCurrentTree&&i.parentsUntil("[data-smartmenus-id]","ul").each(function(){$(this).dataSM("parent-a").addClass("current")}))})}this.wasCollapsible=this.isCollapsible()},destroy:function(t){if(!t){var e=".smartmenus";this.$root.removeData("smartmenus").removeAttr("data-smartmenus-id").removeDataSM("level").off(e),e+=this.rootId,$(document).off(e),$(window).off(e),this.opts.subIndicators&&(this.$subArrow=null)}this.menuHideAll();var i=this;this.$root.find("ul").each(function(){var t=$(this);t.dataSM("scroll-arrows")&&t.dataSM("scroll-arrows").remove(),t.dataSM("shown-before")&&((i.opts.subMenusMinWidth||i.opts.subMenusMaxWidth)&&t.css({width:"",minWidth:"",maxWidth:""}).removeClass("sm-nowrap"),t.dataSM("scroll-arrows")&&t.dataSM("scroll-arrows").remove(),t.css({zIndex:"",top:"",left:"",marginLeft:"",marginTop:"",display:""})),0==(t.attr("id")||"").indexOf(i.accessIdPrefix)&&t.removeAttr("id")}).removeDataSM("in-mega").removeDataSM("shown-before").removeDataSM("scroll-arrows").removeDataSM("parent-a").removeDataSM("level").removeDataSM("beforefirstshowfired").removeAttr("role").removeAttr("aria-hidden").removeAttr("aria-labelledby").removeAttr("aria-expanded"),this.$root.find("a.has-submenu").each(function(){var t=$(this);0==t.attr("id").indexOf(i.accessIdPrefix)&&t.removeAttr("id")}).removeClass("has-submenu").removeDataSM("sub").removeAttr("aria-haspopup").removeAttr("aria-controls").removeAttr("aria-expanded").closest("li").removeDataSM("sub"),this.opts.subIndicators&&this.$root.find("span.sub-arrow").remove(),this.opts.markCurrentItem&&this.$root.find("a.current").removeClass("current"),t||(this.$root=null,this.$firstLink=null,this.$firstSub=null,this.$disableOverlay&&(this.$disableOverlay.remove(),this.$disableOverlay=null),menuTrees.splice($.inArray(this,menuTrees),1))},disable:function(t){if(!this.disabled){if(this.menuHideAll(),!t&&!this.opts.isPopup&&this.$root.is(":visible")){var e=this.$root.offset();this.$disableOverlay=$('
').css({position:"absolute",top:e.top,left:e.left,width:this.$root.outerWidth(),height:this.$root.outerHeight(),zIndex:this.getStartZIndex(!0),opacity:0}).appendTo(document.body)}this.disabled=!0}},docClick:function(t){return this.$touchScrollingSub?(this.$touchScrollingSub=null,void 0):((this.visibleSubMenus.length&&!$.contains(this.$root[0],t.target)||$(t.target).closest("a").length)&&this.menuHideAll(),void 0)},docTouchEnd:function(){if(this.lastTouch){if(!(!this.visibleSubMenus.length||void 0!==this.lastTouch.x2&&this.lastTouch.x1!=this.lastTouch.x2||void 0!==this.lastTouch.y2&&this.lastTouch.y1!=this.lastTouch.y2||this.lastTouch.target&&$.contains(this.$root[0],this.lastTouch.target))){this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0);var t=this;this.hideTimeout=setTimeout(function(){t.menuHideAll()},350)}this.lastTouch=null}},docTouchMove:function(t){if(this.lastTouch){var e=t.originalEvent.touches[0];this.lastTouch.x2=e.pageX,this.lastTouch.y2=e.pageY}},docTouchStart:function(t){var e=t.originalEvent.touches[0];this.lastTouch={x1:e.pageX,y1:e.pageY,target:e.target}},enable:function(){this.disabled&&(this.$disableOverlay&&(this.$disableOverlay.remove(),this.$disableOverlay=null),this.disabled=!1)},getClosestMenu:function(t){for(var e=$(t).closest("ul");e.dataSM("in-mega");)e=e.parent().closest("ul");return e[0]||null},getHeight:function(t){return this.getOffset(t,!0)},getOffset:function(t,e){var i;"none"==t.css("display")&&(i={position:t[0].style.position,visibility:t[0].style.visibility},t.css({position:"absolute",visibility:"hidden"}).show());var s=t[0].getBoundingClientRect&&t[0].getBoundingClientRect(),o=s&&(e?s.height||s.bottom-s.top:s.width||s.right-s.left);return o||0===o||(o=e?t[0].offsetHeight:t[0].offsetWidth),i&&t.hide().css(i),o},getStartZIndex:function(t){var e=parseInt(this[t?"$root":"$firstSub"].css("z-index"));return!t&&isNaN(e)&&(e=parseInt(this.$root.css("z-index"))),isNaN(e)?1:e},getTouchPoint:function(t){return t.touches&&t.touches[0]||t.changedTouches&&t.changedTouches[0]||t},getViewport:function(t){var e=t?"Height":"Width",i=document.documentElement["client"+e],s=window["inner"+e];return s&&(i=Math.min(i,s)),i},getViewportHeight:function(){return this.getViewport(!0)},getViewportWidth:function(){return this.getViewport()},getWidth:function(t){return this.getOffset(t)},handleEvents:function(){return!this.disabled&&this.isCSSOn()},handleItemEvents:function(t){return this.handleEvents()&&!this.isLinkInMegaMenu(t)},isCollapsible:function(){return"static"==this.$firstSub.css("position")},isCSSOn:function(){return"inline"!=this.$firstLink.css("display")},isFixed:function(){var t="fixed"==this.$root.css("position");return t||this.$root.parentsUntil("body").each(function(){return"fixed"==$(this).css("position")?(t=!0,!1):void 0}),t},isLinkInMegaMenu:function(t){return $(this.getClosestMenu(t[0])).hasClass("mega-menu")},isTouchMode:function(){return!mouse||this.opts.noMouseOver||this.isCollapsible()},itemActivate:function(t,e){var i=t.closest("ul"),s=i.dataSM("level");if(s>1&&(!this.activatedItems[s-2]||this.activatedItems[s-2][0]!=i.dataSM("parent-a")[0])){var o=this;$(i.parentsUntil("[data-smartmenus-id]","ul").get().reverse()).add(i).each(function(){o.itemActivate($(this).dataSM("parent-a"))})}if((!this.isCollapsible()||e)&&this.menuHideSubMenus(this.activatedItems[s-1]&&this.activatedItems[s-1][0]==t[0]?s:s-1),this.activatedItems[s-1]=t,this.$root.triggerHandler("activate.smapi",t[0])!==!1){var a=t.dataSM("sub");a&&(this.isTouchMode()||!this.opts.showOnClick||this.clickActivated)&&this.menuShow(a)}},itemBlur:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&this.$root.triggerHandler("blur.smapi",e[0])},itemClick:function(t){var e=$(t.currentTarget);if(this.handleItemEvents(e)){if(this.$touchScrollingSub&&this.$touchScrollingSub[0]==e.closest("ul")[0])return this.$touchScrollingSub=null,t.stopPropagation(),!1;if(this.$root.triggerHandler("click.smapi",e[0])===!1)return!1;var i=$(t.target).is(".sub-arrow"),s=e.dataSM("sub"),o=s?2==s.dataSM("level"):!1,a=this.isCollapsible(),n=/toggle$/.test(this.opts.collapsibleBehavior),r=/link$/.test(this.opts.collapsibleBehavior),h=/^accordion/.test(this.opts.collapsibleBehavior);if(s&&!s.is(":visible")){if((!r||!a||i)&&(this.opts.showOnClick&&o&&(this.clickActivated=!0),this.itemActivate(e,h),s.is(":visible")))return this.focusActivated=!0,!1}else if(a&&(n||i))return this.itemActivate(e,h),this.menuHide(s),n&&(this.focusActivated=!1),!1;return this.opts.showOnClick&&o||e.hasClass("disabled")||this.$root.triggerHandler("select.smapi",e[0])===!1?!1:void 0}},itemDown:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&e.dataSM("mousedown",!0)},itemEnter:function(t){var e=$(t.currentTarget);if(this.handleItemEvents(e)){if(!this.isTouchMode()){this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0);var i=this;this.showTimeout=setTimeout(function(){i.itemActivate(e)},this.opts.showOnClick&&1==e.closest("ul").dataSM("level")?1:this.opts.showTimeout)}this.$root.triggerHandler("mouseenter.smapi",e[0])}},itemFocus:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&(!this.focusActivated||this.isTouchMode()&&e.dataSM("mousedown")||this.activatedItems.length&&this.activatedItems[this.activatedItems.length-1][0]==e[0]||this.itemActivate(e,!0),this.$root.triggerHandler("focus.smapi",e[0]))},itemLeave:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&(this.isTouchMode()||(e[0].blur(),this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0)),e.removeDataSM("mousedown"),this.$root.triggerHandler("mouseleave.smapi",e[0]))},menuHide:function(t){if(this.$root.triggerHandler("beforehide.smapi",t[0])!==!1&&(canAnimate&&t.stop(!0,!0),"none"!=t.css("display"))){var e=function(){t.css("z-index","")};this.isCollapsible()?canAnimate&&this.opts.collapsibleHideFunction?this.opts.collapsibleHideFunction.call(this,t,e):t.hide(this.opts.collapsibleHideDuration,e):canAnimate&&this.opts.hideFunction?this.opts.hideFunction.call(this,t,e):t.hide(this.opts.hideDuration,e),t.dataSM("scroll")&&(this.menuScrollStop(t),t.css({"touch-action":"","-ms-touch-action":"","-webkit-transform":"",transform:""}).off(".smartmenus_scroll").removeDataSM("scroll").dataSM("scroll-arrows").hide()),t.dataSM("parent-a").removeClass("highlighted").attr("aria-expanded","false"),t.attr({"aria-expanded":"false","aria-hidden":"true"});var i=t.dataSM("level");this.activatedItems.splice(i-1,1),this.visibleSubMenus.splice($.inArray(t,this.visibleSubMenus),1),this.$root.triggerHandler("hide.smapi",t[0])}},menuHideAll:function(){this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0);for(var t=this.opts.isPopup?1:0,e=this.visibleSubMenus.length-1;e>=t;e--)this.menuHide(this.visibleSubMenus[e]);this.opts.isPopup&&(canAnimate&&this.$root.stop(!0,!0),this.$root.is(":visible")&&(canAnimate&&this.opts.hideFunction?this.opts.hideFunction.call(this,this.$root):this.$root.hide(this.opts.hideDuration))),this.activatedItems=[],this.visibleSubMenus=[],this.clickActivated=!1,this.focusActivated=!1,this.zIndexInc=0,this.$root.triggerHandler("hideAll.smapi")},menuHideSubMenus:function(t){for(var e=this.activatedItems.length-1;e>=t;e--){var i=this.activatedItems[e].dataSM("sub");i&&this.menuHide(i)}},menuInit:function(t){if(!t.dataSM("in-mega")){t.hasClass("mega-menu")&&t.find("ul").dataSM("in-mega",!0);for(var e=2,i=t[0];(i=i.parentNode.parentNode)!=this.$root[0];)e++;var s=t.prevAll("a").eq(-1);s.length||(s=t.prevAll().find("a").eq(-1)),s.addClass("has-submenu").dataSM("sub",t),t.dataSM("parent-a",s).dataSM("level",e).parent().dataSM("sub",t);var o=s.attr("id")||this.accessIdPrefix+ ++this.idInc,a=t.attr("id")||this.accessIdPrefix+ ++this.idInc;s.attr({id:o,"aria-haspopup":"true","aria-controls":a,"aria-expanded":"false"}),t.attr({id:a,role:"group","aria-hidden":"true","aria-labelledby":o,"aria-expanded":"false"}),this.opts.subIndicators&&s[this.opts.subIndicatorsPos](this.$subArrow.clone())}},menuPosition:function(t){var e,i,s=t.dataSM("parent-a"),o=s.closest("li"),a=o.parent(),n=t.dataSM("level"),r=this.getWidth(t),h=this.getHeight(t),u=s.offset(),l=u.left,c=u.top,d=this.getWidth(s),m=this.getHeight(s),p=$(window),f=p.scrollLeft(),v=p.scrollTop(),b=this.getViewportWidth(),S=this.getViewportHeight(),g=a.parent().is("[data-sm-horizontal-sub]")||2==n&&!a.hasClass("sm-vertical"),M=this.opts.rightToLeftSubMenus&&!o.is("[data-sm-reverse]")||!this.opts.rightToLeftSubMenus&&o.is("[data-sm-reverse]"),w=2==n?this.opts.mainMenuSubOffsetX:this.opts.subMenusSubOffsetX,T=2==n?this.opts.mainMenuSubOffsetY:this.opts.subMenusSubOffsetY;if(g?(e=M?d-r-w:w,i=this.opts.bottomToTopSubMenus?-h-T:m+T):(e=M?w-r:d-w,i=this.opts.bottomToTopSubMenus?m-T-h:T),this.opts.keepInViewport){var y=l+e,I=c+i;if(M&&f>y?e=g?f-y+e:d-w:!M&&y+r>f+b&&(e=g?f+b-r-y+e:w-r),g||(S>h&&I+h>v+S?i+=v+S-h-I:(h>=S||v>I)&&(i+=v-I)),g&&(I+h>v+S+.49||v>I)||!g&&h>S+.49){var x=this;t.dataSM("scroll-arrows")||t.dataSM("scroll-arrows",$([$('')[0],$('')[0]]).on({mouseenter:function(){t.dataSM("scroll").up=$(this).hasClass("scroll-up"),x.menuScroll(t)},mouseleave:function(e){x.menuScrollStop(t),x.menuScrollOut(t,e)},"mousewheel DOMMouseScroll":function(t){t.preventDefault()}}).insertAfter(t));var A=".smartmenus_scroll";if(t.dataSM("scroll",{y:this.cssTransforms3d?0:i-m,step:1,itemH:m,subH:h,arrowDownH:this.getHeight(t.dataSM("scroll-arrows").eq(1))}).on(getEventsNS({mouseover:function(e){x.menuScrollOver(t,e)},mouseout:function(e){x.menuScrollOut(t,e)},"mousewheel DOMMouseScroll":function(e){x.menuScrollMousewheel(t,e)}},A)).dataSM("scroll-arrows").css({top:"auto",left:"0",marginLeft:e+(parseInt(t.css("border-left-width"))||0),width:r-(parseInt(t.css("border-left-width"))||0)-(parseInt(t.css("border-right-width"))||0),zIndex:t.css("z-index")}).eq(g&&this.opts.bottomToTopSubMenus?0:1).show(),this.isFixed()){var C={};C[touchEvents?"touchstart touchmove touchend":"pointerdown pointermove pointerup MSPointerDown MSPointerMove MSPointerUp"]=function(e){x.menuScrollTouch(t,e)},t.css({"touch-action":"none","-ms-touch-action":"none"}).on(getEventsNS(C,A))}}}t.css({top:"auto",left:"0",marginLeft:e,marginTop:i-m})},menuScroll:function(t,e,i){var s,o=t.dataSM("scroll"),a=t.dataSM("scroll-arrows"),n=o.up?o.upEnd:o.downEnd;if(!e&&o.momentum){if(o.momentum*=.92,s=o.momentum,.5>s)return this.menuScrollStop(t),void 0}else s=i||(e||!this.opts.scrollAccelerate?this.opts.scrollStep:Math.floor(o.step));var r=t.dataSM("level");if(this.activatedItems[r-1]&&this.activatedItems[r-1].dataSM("sub")&&this.activatedItems[r-1].dataSM("sub").is(":visible")&&this.menuHideSubMenus(r-1),o.y=o.up&&o.y>=n||!o.up&&n>=o.y?o.y:Math.abs(n-o.y)>s?o.y+(o.up?s:-s):n,t.css(this.cssTransforms3d?{"-webkit-transform":"translate3d(0, "+o.y+"px, 0)",transform:"translate3d(0, "+o.y+"px, 0)"}:{marginTop:o.y}),mouse&&(o.up&&o.y>o.downEnd||!o.up&&o.y0;t.dataSM("scroll-arrows").eq(i?0:1).is(":visible")&&(t.dataSM("scroll").up=i,this.menuScroll(t,!0))}e.preventDefault()},menuScrollOut:function(t,e){mouse&&(/^scroll-(up|down)/.test((e.relatedTarget||"").className)||(t[0]==e.relatedTarget||$.contains(t[0],e.relatedTarget))&&this.getClosestMenu(e.relatedTarget)==t[0]||t.dataSM("scroll-arrows").css("visibility","hidden"))},menuScrollOver:function(t,e){if(mouse&&!/^scroll-(up|down)/.test(e.target.className)&&this.getClosestMenu(e.target)==t[0]){this.menuScrollRefreshData(t);var i=t.dataSM("scroll"),s=$(window).scrollTop()-t.dataSM("parent-a").offset().top-i.itemH;t.dataSM("scroll-arrows").eq(0).css("margin-top",s).end().eq(1).css("margin-top",s+this.getViewportHeight()-i.arrowDownH).end().css("visibility","visible")}},menuScrollRefreshData:function(t){var e=t.dataSM("scroll"),i=$(window).scrollTop()-t.dataSM("parent-a").offset().top-e.itemH;this.cssTransforms3d&&(i=-(parseFloat(t.css("margin-top"))-i)),$.extend(e,{upEnd:i,downEnd:i+this.getViewportHeight()-e.subH})},menuScrollStop:function(t){return this.scrollTimeout?(cancelAnimationFrame(this.scrollTimeout),this.scrollTimeout=0,t.dataSM("scroll").step=1,!0):void 0},menuScrollTouch:function(t,e){if(e=e.originalEvent,isTouchEvent(e)){var i=this.getTouchPoint(e);if(this.getClosestMenu(i.target)==t[0]){var s=t.dataSM("scroll");if(/(start|down)$/i.test(e.type))this.menuScrollStop(t)?(e.preventDefault(),this.$touchScrollingSub=t):this.$touchScrollingSub=null,this.menuScrollRefreshData(t),$.extend(s,{touchStartY:i.pageY,touchStartTime:e.timeStamp});else if(/move$/i.test(e.type)){var o=void 0!==s.touchY?s.touchY:s.touchStartY;if(void 0!==o&&o!=i.pageY){this.$touchScrollingSub=t;var a=i.pageY>o;void 0!==s.up&&s.up!=a&&$.extend(s,{touchStartY:i.pageY,touchStartTime:e.timeStamp}),$.extend(s,{up:a,touchY:i.pageY}),this.menuScroll(t,!0,Math.abs(i.pageY-o))}e.preventDefault()}else void 0!==s.touchY&&((s.momentum=15*Math.pow(Math.abs(i.pageY-s.touchStartY)/(e.timeStamp-s.touchStartTime),2))&&(this.menuScrollStop(t),this.menuScroll(t),e.preventDefault()),delete s.touchY)}}},menuShow:function(t){if((t.dataSM("beforefirstshowfired")||(t.dataSM("beforefirstshowfired",!0),this.$root.triggerHandler("beforefirstshow.smapi",t[0])!==!1))&&this.$root.triggerHandler("beforeshow.smapi",t[0])!==!1&&(t.dataSM("shown-before",!0),canAnimate&&t.stop(!0,!0),!t.is(":visible"))){var e=t.dataSM("parent-a"),i=this.isCollapsible();if((this.opts.keepHighlighted||i)&&e.addClass("highlighted"),i)t.removeClass("sm-nowrap").css({zIndex:"",width:"auto",minWidth:"",maxWidth:"",top:"",left:"",marginLeft:"",marginTop:""});else{if(t.css("z-index",this.zIndexInc=(this.zIndexInc||this.getStartZIndex())+1),(this.opts.subMenusMinWidth||this.opts.subMenusMaxWidth)&&(t.css({width:"auto",minWidth:"",maxWidth:""}).addClass("sm-nowrap"),this.opts.subMenusMinWidth&&t.css("min-width",this.opts.subMenusMinWidth),this.opts.subMenusMaxWidth)){var s=this.getWidth(t);t.css("max-width",this.opts.subMenusMaxWidth),s>this.getWidth(t)&&t.removeClass("sm-nowrap").css("width",this.opts.subMenusMaxWidth)}this.menuPosition(t)}var o=function(){t.css("overflow","")};i?canAnimate&&this.opts.collapsibleShowFunction?this.opts.collapsibleShowFunction.call(this,t,o):t.show(this.opts.collapsibleShowDuration,o):canAnimate&&this.opts.showFunction?this.opts.showFunction.call(this,t,o):t.show(this.opts.showDuration,o),e.attr("aria-expanded","true"),t.attr({"aria-expanded":"true","aria-hidden":"false"}),this.visibleSubMenus.push(t),this.$root.triggerHandler("show.smapi",t[0])}},popupHide:function(t){this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0);var e=this;this.hideTimeout=setTimeout(function(){e.menuHideAll()},t?1:this.opts.hideTimeout)},popupShow:function(t,e){if(!this.opts.isPopup)return alert('SmartMenus jQuery Error:\n\nIf you want to show this menu via the "popupShow" method, set the isPopup:true option.'),void 0;if(this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0),this.$root.dataSM("shown-before",!0),canAnimate&&this.$root.stop(!0,!0),!this.$root.is(":visible")){this.$root.css({left:t,top:e});var i=this,s=function(){i.$root.css("overflow","")};canAnimate&&this.opts.showFunction?this.opts.showFunction.call(this,this.$root,s):this.$root.show(this.opts.showDuration,s),this.visibleSubMenus[0]=this.$root}},refresh:function(){this.destroy(!0),this.init(!0)},rootKeyDown:function(t){if(this.handleEvents())switch(t.keyCode){case 27:var e=this.activatedItems[0];if(e){this.menuHideAll(),e[0].focus();var i=e.dataSM("sub");i&&this.menuHide(i)}break;case 32:var s=$(t.target);if(s.is("a")&&this.handleItemEvents(s)){var i=s.dataSM("sub");i&&!i.is(":visible")&&(this.itemClick({currentTarget:t.target}),t.preventDefault())}}},rootOut:function(t){if(this.handleEvents()&&!this.isTouchMode()&&t.target!=this.$root[0]&&(this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0),!this.opts.showOnClick||!this.opts.hideOnClick)){var e=this;this.hideTimeout=setTimeout(function(){e.menuHideAll()},this.opts.hideTimeout)}},rootOver:function(t){this.handleEvents()&&!this.isTouchMode()&&t.target!=this.$root[0]&&this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0)},winResize:function(t){if(this.handleEvents()){if(!("onorientationchange"in window)||"orientationchange"==t.type){var e=this.isCollapsible();this.wasCollapsible&&e||(this.activatedItems.length&&this.activatedItems[this.activatedItems.length-1][0].blur(),this.menuHideAll()),this.wasCollapsible=e}}else if(this.$disableOverlay){var i=this.$root.offset();this.$disableOverlay.css({top:i.top,left:i.left,width:this.$root.outerWidth(),height:this.$root.outerHeight()})}}}}),$.fn.dataSM=function(t,e){return e?this.data(t+"_smartmenus",e):this.data(t+"_smartmenus")},$.fn.removeDataSM=function(t){return this.removeData(t+"_smartmenus")},$.fn.smartmenus=function(options){if("string"==typeof options){var args=arguments,method=options;return Array.prototype.shift.call(args),this.each(function(){var t=$(this).data("smartmenus");t&&t[method]&&t[method].apply(t,args)})}return this.each(function(){var dataOpts=$(this).data("sm-options")||null;if(dataOpts)try{dataOpts=eval("("+dataOpts+")")}catch(e){dataOpts=null,alert('ERROR\n\nSmartMenus jQuery init:\nInvalid "data-sm-options" attribute value syntax.')}new $.SmartMenus(this,$.extend({},$.fn.smartmenus.defaults,options,dataOpts))})},$.fn.smartmenus.defaults={isPopup:!1,mainMenuSubOffsetX:0,mainMenuSubOffsetY:0,subMenusSubOffsetX:0,subMenusSubOffsetY:0,subMenusMinWidth:"10em",subMenusMaxWidth:"20em",subIndicators:!0,subIndicatorsPos:"append",subIndicatorsText:"",scrollStep:30,scrollAccelerate:!0,showTimeout:250,hideTimeout:500,showDuration:0,showFunction:null,hideDuration:0,hideFunction:function(t,e){t.fadeOut(200,e)},collapsibleShowDuration:0,collapsibleShowFunction:function(t,e){t.slideDown(200,e)},collapsibleHideDuration:0,collapsibleHideFunction:function(t,e){t.slideUp(200,e)},showOnClick:!1,hideOnClick:!0,noMouseOver:!1,keepInViewport:!0,keepHighlighted:!0,markCurrentItem:!1,markCurrentTree:!0,rightToLeftSubMenus:!1,bottomToTopSubMenus:!1,collapsibleBehavior:"default"},$}); \ No newline at end of file diff --git a/Jupyter/html/menu.js b/Jupyter/html/menu.js new file mode 100644 index 000000000..0fd1e9901 --- /dev/null +++ b/Jupyter/html/menu.js @@ -0,0 +1,134 @@ +/* + @licstart The following is the entire license notice for the JavaScript code in this file. + + The MIT License (MIT) + + Copyright (C) 1997-2020 by Dimitri van Heesch + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software + and associated documentation files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, publish, distribute, + sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or + substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + @licend The above is the entire license notice for the JavaScript code in this file + */ +function initMenu(relPath,searchEnabled,serverSide,searchPage,search,treeview) { + function makeTree(data,relPath) { + let result=''; + if ('children' in data) { + result+='
    '; + for (let i in data.children) { + let url; + const link = data.children[i].url; + if (link.substring(0,1)=='^') { + url = link.substring(1); + } else { + url = relPath+link; + } + result+='
  • '+ + data.children[i].text+''+ + makeTree(data.children[i],relPath)+'
  • '; + } + result+='
'; + } + return result; + } + let searchBoxHtml; + if (searchEnabled) { + if (serverSide) { + searchBoxHtml='
'+ + '
'+ + '
 '+ + ''+ + '
'+ + '
'+ + '
'+ + '
'; + } else { + searchBoxHtml='
'+ + ''+ + ' '+ + ''+ + ''+ + ''+ + ''+ + ''+ + '
'; + } + } + + $('#main-nav').before('
'+ + ''+ + ''+ + '
'); + $('#main-nav').append(makeTree(menudata,relPath)); + $('#main-nav').children(':first').addClass('sm sm-dox').attr('id','main-menu'); + if (searchBoxHtml) { + $('#main-menu').append('
  • '); + } + const $mainMenuState = $('#main-menu-state'); + let prevWidth = 0; + if ($mainMenuState.length) { + const initResizableIfExists = function() { + if (typeof initResizable==='function') initResizable(treeview); + } + // animate mobile menu + $mainMenuState.change(function() { + const $menu = $('#main-menu'); + let options = { duration: 250, step: initResizableIfExists }; + if (this.checked) { + options['complete'] = () => $menu.css('display', 'block'); + $menu.hide().slideDown(options); + } else { + options['complete'] = () => $menu.css('display', 'none'); + $menu.show().slideUp(options); + } + }); + // set default menu visibility + const resetState = function() { + const $menu = $('#main-menu'); + const newWidth = $(window).outerWidth(); + if (newWidth!=prevWidth) { + if ($(window).outerWidth()<768) { + $mainMenuState.prop('checked',false); $menu.hide(); + $('#searchBoxPos1').html(searchBoxHtml); + $('#searchBoxPos2').hide(); + } else { + $menu.show(); + $('#searchBoxPos1').empty(); + $('#searchBoxPos2').html(searchBoxHtml); + $('#searchBoxPos2').show(); + } + if (typeof searchBox!=='undefined') { + searchBox.CloseResultsWindow(); + } + prevWidth = newWidth; + } + } + $(window).ready(function() { resetState(); initResizableIfExists(); }); + $(window).resize(resetState); + } + $('#main-menu').smartmenus(); +} +/* @license-end */ diff --git a/Jupyter/html/menudata.js b/Jupyter/html/menudata.js new file mode 100644 index 000000000..eeb0d0ea0 --- /dev/null +++ b/Jupyter/html/menudata.js @@ -0,0 +1,29 @@ +/* + @licstart The following is the entire license notice for the JavaScript code in this file. + + The MIT License (MIT) + + Copyright (C) 1997-2020 by Dimitri van Heesch + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software + and associated documentation files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, publish, distribute, + sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or + substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + @licend The above is the entire license notice for the JavaScript code in this file +*/ +var menudata={children:[ +{text:"IMP Manual",url:"^https://integrativemodeling.org/nightly/doc/manual/"}, +{text:"Reference Guide",url:"^https://integrativemodeling.org/nightly/doc/ref/"}, +{text:"Tutorial Index",url:"^https://integrativemodeling.org/tutorials/"}, +{text:"Main Page",url:"index.html"}]} diff --git a/Jupyter/html/minus.svg b/Jupyter/html/minus.svg new file mode 100644 index 000000000..f70d0c1a1 --- /dev/null +++ b/Jupyter/html/minus.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/Jupyter/html/minusd.svg b/Jupyter/html/minusd.svg new file mode 100644 index 000000000..5f8e87962 --- /dev/null +++ b/Jupyter/html/minusd.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/Jupyter/html/nav_f.png b/Jupyter/html/nav_f.png new file mode 100644 index 0000000000000000000000000000000000000000..72a58a529ed3a9ed6aa0c51a79cf207e026deee2 GIT binary patch literal 153 zcmeAS@N?(olHy`uVBq!ia0vp^j6iI`!2~2XGqLUlQVE_ejv*C{Z|{2ZH7M}7UYxc) zn!W8uqtnIQ>_z8U literal 0 HcmV?d00001 diff --git a/Jupyter/html/nav_fd.png b/Jupyter/html/nav_fd.png new file mode 100644 index 0000000000000000000000000000000000000000..032fbdd4c54f54fa9a2e6423b94ef4b2ebdfaceb GIT binary patch literal 169 zcmeAS@N?(olHy`uVBq!ia0vp^j6iI`!2~2XGqLUlQU#tajv*C{Z|C~*H7f|XvG1G8 zt7aS*L7xwMeS}!z6R#{C5tIw-s~AJ==F^i}x3XyJseHR@yF& zerFf(Zf;Dd{+(0lDIROL@Sj-Ju2JQ8&-n%4%q?>|^bShc&lR?}7HeMo@BDl5N(aHY Uj$gdr1MOz;boFyt=akR{0D!zeaR2}S literal 0 HcmV?d00001 diff --git a/Jupyter/html/nav_g.png b/Jupyter/html/nav_g.png new file mode 100644 index 0000000000000000000000000000000000000000..2093a237a94f6c83e19ec6e5fd42f7ddabdafa81 GIT binary patch literal 95 zcmeAS@N?(olHy`uVBq!ia0vp^j6lrB!3HFm1ilyoDK$?Q$B+ufw|5PB85lU25BhtE tr?otc=hd~V+ws&_A@j8Fiv!KF$B+ufw|5=67#uj90@pIL wZ=Q8~_Ju`#59=RjDrmm`tMD@M=!-l18IR?&vFVdQ&MBb@0HFXL6W-eg#Jd_@e6*DPn)w;=|1H}Zvm9l6xXXB%>yL=NQU;mg M>FVdQ&MBb@0Bdt1Qvd(} literal 0 HcmV?d00001 diff --git a/Jupyter/html/navtree.css b/Jupyter/html/navtree.css new file mode 100644 index 000000000..69211d4a7 --- /dev/null +++ b/Jupyter/html/navtree.css @@ -0,0 +1,149 @@ +#nav-tree .children_ul { + margin:0; + padding:4px; +} + +#nav-tree ul { + list-style:none outside none; + margin:0px; + padding:0px; +} + +#nav-tree li { + white-space:nowrap; + margin:0px; + padding:0px; +} + +#nav-tree .plus { + margin:0px; +} + +#nav-tree .selected { + background-image: url('tab_a.png'); + background-repeat:repeat-x; + color: var(--nav-text-active-color); + text-shadow: var(--nav-text-active-shadow); +} + +#nav-tree .selected .arrow { + color: var(--nav-arrow-selected-color); + text-shadow: none; +} + +#nav-tree img { + margin:0px; + padding:0px; + border:0px; + vertical-align: middle; +} + +#nav-tree a { + text-decoration:none; + padding:0px; + margin:0px; +} + +#nav-tree .label { + margin:0px; + padding:0px; + font: 12px var(--font-family-nav); +} + +#nav-tree .label a { + padding:2px; +} + +#nav-tree .selected a { + text-decoration:none; + color:var(--nav-text-active-color); +} + +#nav-tree .children_ul { + margin:0px; + padding:0px; +} + +#nav-tree .item { + margin:0px; + padding:0px; +} + +#nav-tree { + padding: 0px 0px; + font-size:14px; + overflow:auto; +} + +#doc-content { + overflow:auto; + display:block; + padding:0px; + margin:0px; + -webkit-overflow-scrolling : touch; /* iOS 5+ */ +} + +#side-nav { + padding:0 6px 0 0; + margin: 0px; + display:block; + position: absolute; + left: 0px; + width: $width; + overflow : hidden; +} + +.ui-resizable .ui-resizable-handle { + display:block; +} + +.ui-resizable-e { + background-image:var(--nav-splitbar-image); + background-size:100%; + background-repeat:repeat-y; + background-attachment: scroll; + cursor:ew-resize; + height:100%; + right:0; + top:0; + width:6px; +} + +.ui-resizable-handle { + display:none; + font-size:0.1px; + position:absolute; + z-index:1; +} + +#nav-tree-contents { + margin: 6px 0px 0px 0px; +} + +#nav-tree { + background-repeat:repeat-x; + background-color: var(--nav-background-color); + -webkit-overflow-scrolling : touch; /* iOS 5+ */ +} + +#nav-sync { + position:absolute; + top:5px; + right:24px; + z-index:0; +} + +#nav-sync img { + opacity:0.3; +} + +#nav-sync img:hover { + opacity:0.9; +} + +@media print +{ + #nav-tree { display: none; } + div.ui-resizable-handle { display: none; position: relative; } +} + diff --git a/Jupyter/html/notebook.html b/Jupyter/html/notebook.html new file mode 100644 index 000000000..197d8aeaa --- /dev/null +++ b/Jupyter/html/notebook.html @@ -0,0 +1,1010 @@ + + + + + + + + +IMP Tutorial: IMP spatiotemporal tutorial + + + + + + + + + + + + + + +
    + + + + + +
    + IMP logo + +
    +
    +
    +
    +
    +
    +
    + + + + + + +
    +
    IMP Tutorial +
    +
    +
    + + + + + + + + +
    +
    +
    +
    IMP spatiotemporal tutorial
    +
    +
    +

    +

    +Introduction

    +

    Biomolecules are constantly in motion; therefore, a complete depiction of their function must include their dynamics instead of just static structures. We have developed an integrative spatiotemporal approach to model dynamic systems.

    +

    Our approach applies a composite workflow, consisting of three modeling problems to compute (i) heterogeneity models, (ii) snapshot models, and (iii) trajectory models. Heterogeneity models describe the possible biomolecular compositions of the system at each time point. Optionally, other auxiliary variables can be considered, such as the coarse location in the final state when modeling an assembly process. For each heterogeneity model, one snapshot model is produced. A snapshot model is a set of alternative standard static integrative structure models based on the information available for the corresponding time point. Then, trajectory models are created by connecting alternative snapshot models at adjacent time points. These trajectory models can be scored based on both the scores of static structures and the transitions between them, allowing for the creation of trajectories that are in agreement with the input information by construction.

    +

    If you use this tutorial or its accompanying method, please site the corresponding publications:

    +
      +
    • Latham, A.P.; Tempkin, J.O.B.; Otsuka, S.; Zhang, W.; Ellenberg, J.; Sali, A. bioRxiv, 2024, https://doi.org/10.1101/2024.08.06.606842.
    • +
    • Latham, A.P.; Rožič, M.; Webb, B.M., Sali, A. in preparation. (tutorial)
    • +
    +

    +Integrative spatiotemporal modeling workflow

    +

    In general, integrative modeling proceeds through three steps (i. gathering information; ii. choosing the model representation, scoring alternative models, and searching for good scoring models; and iii. assessing the models). In integrative spatiotemporal modeling, these three steps are repeated for each modeling problem in the composite workflow (i. modeling of heterogeneity, ii. modeling of snapshots, and iii. modeling of a trajectory).

    +

    +

    This tutorial will walk you through the creation of a spatiotemporal model for the hypothetical assembly mechanism of the Bmi1/Ring1b-UbcH5c complex. We note that all experimental data besides the static structure used in this study is purely hypothetical, and, thus, the model should not be interpreted to be meaningful about the actual assembly mechanism of the complex.

    +

    Finally, this notebook is intended to present an abbreviated version of this protocol, with the computationally expensive steps excluded. A more complete version of this tutorial can be found as a series of python scripts at https://github.com/salilab/imp_spatiotemporal_tutorial.

    +

    +Modeling of heterogeneity

    +

    Here, we describe the first modeling problem in our composite workflow, how to build models of heterogeneity modeling using IMP. In this tutorial, heterogeneity modeling only includes protein copy number; however, in general, other types of information, such as the coarse location in the final state, could also be included in heterogeneity models.

    +

    +Heterogeneity modeling step 1: gathering of information

    +

    We begin heterogeneity modeling with the first step of integrative modeling, gathering information. Heterogeneity modeling will rely on copy number information about the complex. In this case, we utilize the X-ray crystal structure of the fully assembled Bmi1/Ring1b-UbcH5c complex from the protein data bank (PDB), and synthetically generated protein copy numbers during the assembly process, which could be generated from experiments such as flourescence correlation spectroscopy (FCS).

    +

    +

    The PDB structure of the complex informs the final state of our model and constrains the maximum copy number for each protein, while the protein copy number data gives time-dependent information about the protein copy number in the assembling complex.

    +

    +Heterogeneity modeling step 2: representation, scoring function, and search process

    +

    Next, we represent, score and search for heterogeneity models models. A single heterogeneity model is a set of protein copy numbers, scored according to its fit to experimental copy number data at that time point. As ET and SAXS data, are only available at 0 minutes, 1 minute, and 2 minutes, we choose to create heterogeneity models at these three time points. We then use prepare_protein_library, to calculate the protein copy numbers for each snapshot model and to use the topology file of the full complex (spatiotemporal_topology.txt) to generate a topology file for each of these snapshot models. The choices made in this topology file are important for the representation, scoring function, and search process for snapshot models, and are discussed later. For heterogeneity modeling, we choose to model 3 protein copy numbers at each time point, and restrict the final time point to have the same protein copy numbers as the PDB structure.

    +
    # General imports for the tutorial
    +
    import sys, os, glob, shutil
    +
    import IMP
    +
    import RMF
    +
    import IMP.rmf
    +
    from IMP.spatiotemporal import prepare_protein_library
    +
    import IMP.spatiotemporal as spatiotemporal
    +
    from IMP.spatiotemporal import analysis
    +
    import numpy as np
    +
    import matplotlib.pyplot as plt
    + + +
    # parameters for prepare_protein_library:
    +
    times = ["0min", "1min", "2min"]
    +
    exp_comp = {'A': '../modeling/Input_Information/gen_FCS/exp_compA.csv',
    +
    'B': '../modeling/Input_Information/gen_FCS/exp_compB.csv',
    +
    'C': '../modeling/Input_Information/gen_FCS/exp_compC.csv'}
    +
    expected_subcomplexes = ['A', 'B', 'C']
    +
    template_topology = '../modeling/Heterogeneity/Heterogeneity_Modeling/spatiotemporal_topology.txt'
    +
    template_dict = {'A': ['Ubi-E2-D3'], 'B': ['BMI-1'], 'C': ['E3-ubi-RING2']}
    +
    nmodels = 3
    +
    +
    # calling prepare_protein_library
    +
    prepare_protein_library.prepare_protein_library(times, exp_comp, expected_subcomplexes, nmodels,
    +
    template_topology=template_topology, template_dict=template_dict)
    +

    From the output of prepare_protein_library, we see that there are 3 heterogeneity models at each time point (it is possible to have more snapshot models than copy numbers if multiple copies of the protein exist in the complex). For each heterogeneity model, we see 2 files:

      +
    • *.config, a file with a list of proteins represented in the heterogeneity model
    • +
    • *_topol.txt, a topology file for snapshot modeling corresponding to this heterogeneity model.
    • +
    +

    +Heterogeneity modeling step 3: assessment

    +

    Now, we have a variety of heterogeneity models. In general, there are four ways to assess a model: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model. Here, we will focus specifically on comparing the model to experimental data, as other assessments will be performed later, when the trajectory models are assessed.

    +

    Next, we can plot the modeled and experimental copy numbers simultaneously for each protein, as shown below for proteins A (a), B (b), and C (c).

    +

    +

    From these plots, we observe that the range of possible experimental copy numbers are well sampled by the heterogeneity models, indicating that we are prepared for snapshot modeling.

    +

    +Modeling of snapshots

    +

    Here, we describe the second modeling problem in our composite workflow, how to build models of static snapshot models using IMP. We note that this process is similar to previous tutorials of actin and RNA PolII.

    +

    +Snapshot modeling step 1: gathering of information

    +

    We begin snapshot modeling with the first step of integrative modeling, gathering information. Snapshot modeling utilizes structural information about the complex. In this case, we utilize heterogeneity models, the X-ray crystal structure of the fully assembled Bmi1/Ring1b-UbcH5c complex from the protein data bank (PDB), synthetically generated electron tomography (ET) density maps during the assembly process, and physical principles.

    +

    +

    The heterogeneity models inform protein copy numbers for the snapshot models. The PDB structure of the complex informs the structure of the individual proteins. The time-dependent ET data informs the size and shape of the assembling complex. physical principles inform connectivity and excluded volume.

    +

    +Snapshot modeling step 2: representation, scoring function, and search process

    +

    Next, we represent, score and search for snapshot models. This step is quite computationally expensive. Therefore, we will not run the modeling protocol in this notebook, though the scripts are available in modeling/Snapshots/Snapshots_Modeling/. Here, we will simply describe the important steps made by two scripts. The first, static_snapshot.py, uses IMP to represent, score, and search for a single static snapshot model. The second, start_sim.py, automates the creation of a snapshot model for each heterogeneity model.

    +

    +Modeling one snapshot

    +

    Here, we will describe the process of modeling a single snapshot model, as performed by running static_snapshot.py.

    +

    +Representing the model

    +

    We begin by representing the data and the model. In general, the representation of a system is defined by all the variables that need to be determined.

    +

    For our model of a protein complex, we use a combination of two representations. The first is a series of spherical beads, which can correspond to portions of the biomolecules of interest, such as atoms or groups of atoms. The second is a series of 3D Gaussians, which help calculate the overlap between our model and the density from ET data.

    +

    Beads and Gaussians in our model belong to either a rigid body or flexible string. The positions of all beads and Gaussians in a single rigid body are constrained during sampling and do not move relative to each other. Meanwhile, flexible beads can move freely during sampling, but are restrained by sequence connectivity.

    +

    To begin, we built a topology file with the representation for the model of the complete system, spatiotemporal_topology.txt, located in Heterogeneity/Heterogeneity_Modeling/. This complete topology was used as a template to build topologies of each heterogeneity model. Based on our observation of the structure of the complex, we chose to represent each protein with at least 2 separate rigid bodies, and left the first 28 residues of protein C as flexible beads. Rigid bodies were described with 1 bead for every residue, and 10 residues per Gaussian. Flexible beads were described with 1 bead for every residue and 1 residue per Gaussian. A more complete description of the options available in topology files is available in the the TopologyReader documentation.

    +
    |molecule_name | color | fasta_fn | fasta_id | pdb_fn | chain | residue_range | pdb_offset | bead_size | em_residues_per_gaussian | rigid_body | super_rigid_body | chain_of_super_rigid_bodies |
    +
    +
    |Ubi-E2-D3|blue|3rpg.fasta.txt|Ubi-E2-D3|3rpg.pdb|A|-1,18|2|1|10|1|1||
    +
    |Ubi-E2-D3|blue|3rpg.fasta.txt|Ubi-E2-D3|3rpg.pdb|A|19,147|2|1|10|2|1||
    +
    |BMI-1|red|3rpg.fasta.txt|BMI-1|3rpg.pdb|B|3,83|-2|1|10|3|2||
    +
    |BMI-1|red|3rpg.fasta.txt|BMI-1|3rpg.pdb|B|84,101|-2|1|10|4|2||
    +
    |E3-ubi-RING2|green|3rpg.fasta.txt|E3-ubi-RING2|BEADS|C|16,44|-15|1|1|5|3||
    +
    |E3-ubi-RING2|green|3rpg.fasta.txt|E3-ubi-RING2|3rpg.pdb|C|45,116|-15|1|10|6|3||
    +

    Next, we must prepare static_snapshot.py to read in this topology file. We begin by defining the input variables, state and time, which define which topology to use, as well as the paths to other pieces of input information.

    +
    +
    time = sys.argv[2]
    +
    +
    +
    fasta_dir = "../../../../Input_Information/FASTA"
    +
    +

    Next, we build the system, using the topology tile, described above.

    +bs.add_state(t)

    Then, we prepare for later sampling steps by setting which Monte Carlo moves will be performed. Rotation (rot) and translation (trans) parameters are separately set for super rigid bodies (srb), rigid bodies (rb), and beads (bead).

    +
    max_rb_rot=0.5, max_bead_trans=2.0,
    +
    max_srb_trans=1.0, max_srb_rot=0.5)
    +

    +Scoring the model

    +

    After building the model representation, we choose a scoring function to score the model based on input information. This scoring function is represented as a series of restraints that serve as priors.

    +

    +Connectivity

    +

    We begin with a connectivity restraint, which restrains beads adjacent in sequence to be close in 3D space.

    +

    +Excluded volume

    +

    Next is an excluded volume restraint, which restrains beads to minimize their spatial overlap.

    +
    +
    included_objects=[root_hier],
    +
    resolution=1000)
    +
    output_objects.append(evr)
    +
    evr.add_to_model()
    +

    +Electron tomography

    +

    Finally, we restrain our models based on their fit to ET density maps. Both the experimental map and the forward protein density are represented as Gaussian mixture models (GMMs) to speed up scoring. The score is based on the log of the correlation coefficient between the experimental density and the forward protein density.

    +
    +
    +
    +
    representation_type=IMP.atom.DENSITIES).get_selected_particles()
    +
    +
    +
    densities,
    +
    target_fn=em_map,
    +
    slope=0.000001,
    +
    scale_target_to_mass=True,
    +
    weight=1000)
    +
    output_objects.append(emr)
    +
    emr.add_to_model()
    +

    +Searching for good scoring models

    +

    After building a scoring function that scores alternative models based on their fit to the input information, we aim to search for good scoring models. For complicated systems, stochastic sampling techniques such as Monte Carlo (MC) sampling are often the most efficient way to compute good scoring models. Here, we generate a random initial configuration and then perform temperature replica exchange MC sampling with 16 temperatures from different initial configurations. By performing multiple runs of replica exchange MC from different initial configurations, we can later ensure that our sampling is sufficiently converged.

    +
    +
    max_translation=50)
    +
    +
    +
    root_hier=root_hier,
    +
    monte_carlo_sample_objects=dof.get_movers(),
    +
    global_output_directory='output', # name 'output' is the best for imp sampcon select_good
    +
    output_objects=output_objects,
    +
    monte_carlo_steps=200, # Number of MC steps between writing frames.
    +
    number_of_best_scoring_models=0,
    +
    number_of_frames=500) # number of frames to be saved
    +
    +

    After performing sampling, a variety of outputs will be created. These outputs include .rmf files, which contain multi-resolution models output by IMP, and .out files which contains a variety of information about the run such as the value of the restraints and the MC acceptance rate.

    +

    +Generalizing modeling to all snapshots

    +

    Next, we will describe the process of computing multiple static snapshot models, as performed by running start_sim.py.

    +

    From heterogeneity modeling, we see that there are 3 heterogeneity models at each time point (it is possible to have more snapshot models than copy numbers if multiple copies of the protein exist in the complex), each of which has a corresponding topology file in Heterogeneity/Heterogeneity_Modeling/. We wrote a function, generate_all_snapshots, which creates a directory for each snapshot model, copies the python script and topology file into that directory, and submits a job script to run sampling. The job script will likely need to be customized for the user's computer or cluster.

    +
    +
    +
    main_dir = os.getcwd()
    +
    topol_dir = os.path.join(os.getcwd(), '../../Heterogeneity/Heterogeneity_Modeling')
    +
    items_to_copy = ['static_snapshot.py'] # additionally we need to copy only specific topology file
    +
    +
    "#$ -l h_rt=48:00:00\n\nmodule load Sali\nmodule load imp\nmodule load mpi/openmpi-x86_64\n\n"
    +
    "mpirun -np $NSLOTS python3 static_snapshot.py {state} {time}")
    +
    number_of_runs = 50
    +
    +
    +

    +Snapshot modeling step 3: assessment

    +

    The above code would variety of alternative snapshot models. In general, we would like to assess these models in at least 4 ways: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model. In this portion of the tutorial, we focus specifically on estimating the sampling precision of the model, while quantitative comparisons between the model and experimental data will be reserved for the final step, when we assess trajectories. Again, this assessment process is quite computationally intensive, so, instead of running the script explicitly, we will walk you through the snapshot_assessment.py script, which is located in the modeling/Snapshots/Snapshots_Assessment folder.

    +

    +Filtering good scoring models

    +

    Initially, we want to filter the various alternative structural models to only select those that meet certain parameter thresholds. In this case, we filter the structural models comprising each snapshot model by the median cross correlation with EM data. We note that this filtering criteria is subjective, and developing a Bayesian method to objectively weigh different restraints for filtering remains an interesting future development in integrative modeling.

    +

    The current filtering procedure involves three steps. In the first step, we look through the stat.*.out files to write out the cross correlation with EM data for each model, which, in this case, is labeled column 3, GaussianEMRestraint_None_CCC. In other applications, the column that corresponds to each type of experimental data may change, depending on the scoring terms for each model. For each snapshot model, a new file is written with this data ({state}_{time}_stat.txt).

    +
    +
    +
    +
    runs_nr = 50
    +
    replica_nr = 16
    +
    replica_output_name = 'output'
    +
    decimals_nr = 16
    +
    +
    extracting_stat_files(state_dict, runs_nr, replica_nr, replica_output_name, keys_to_extract, decimals_nr)
    +
    print("extracting_stat_files is DONE")
    +
    print("")
    +
    print("")
    +

    In the second step, we want to determine the median value of EM cross correlation for each snapshot model. We wrote general_rule_calculation to look through the general_rule_column for each {state}_{time}_stat.txt file and determine both the median value and the number of structures generated.

    +
    +
    +
    general_rule_calculation(state_dict, general_rule_column)
    +
    +
    print("general_rule_calculation is DONE")
    +
    print("")
    +
    print("")
    +

    In the third step, we use the imp_sampcon select_good tool to filter each snapshot model, according to the median value determined in the previous step. For each snapshot model, this function produces a file, good_scoring_models/model_ids_scores.txt, which contains the run, replicaID, scores, and sampleID for each model that passes filtering. It also saves RMF files with each model from two independent groups of sampling runs from each snapshot model to good_scoring_models/sample_A and good_scoring_models/sample_B, writes the scores for the two independent groups of sampling runs to good_scoring_models/scoresA.txt and good_scoring_models/scoresB.txt, and writes good_scoring_models/model_sample_ids.txt to connect each model to its division of sampling run. More information on imp_sampcon is available in the analysis portion of the actin tutorial.

    +
    +
    print("general_rule_filter_independent_samples is DONE")
    +
    print("")
    +
    print("")
    +

    +Plotting data, clustering models, and determining sampling precision

    +

    Next, scores can be plotted for analysis. Here, we wrote the create_histograms function to run imp_sampcon plot_score so that it plots distributions for various scores of interest. Each of these plots are saved to histograms{state}_{time}/{score}.png, where score is an object listed in the score_list. These plots are useful for debugging the modeling protocol, and should appear roughly Gaussian.

    +
    +
    'Total_Score',
    +
    'ConnectivityRestraint_Score',
    +
    'ExcludedVolumeSphere_Score',
    +
    'GaussianEMRestraint_None',
    +
    'GaussianEMRestraint_None_CCC'
    +
    ] # list of histograms we want to create in each histograms{state}_{time} directory
    +
    +
    create_histograms(state_dict, main_dir, score_list)
    +
    print("create_histograms is DONE")
    +
    print("")
    +
    print("")
    +

    We then check the number of models in each sampling run though our function, count_rows_and_generate_report, which writes the independent_samples_stat.txt file. Empirically, we have found that ensuring the overall number of models in each independent sample after filtering is roughly equal serves a good first check on sampling convergence.

    +
    +
    print("count_rows_and_generate_report is DONE")
    +
    print("")
    +
    print("")
    +

    Next, we write the density range dictionaries, which are output as {state}_{time}_density_ranges.txt. These dictionaries label each protein in each snapshot model, which will be passed into imp_sampcon to calculate the localization density of each protein.

    +
    +
    print("create_density_dictionary is DONE")
    +
    print("")
    +
    print("")
    +

    Next, we run imp_sampcon exhaust on each snapshot model. This code performs checks on the exhaustiveness of the sampling. Specifically it analyzes the convergence of the model score, whether the two model sets were drawn from the same distribution, and whether each structural cluster includes models from each sample proportionally to its size. The output for each snapshot model is written out to the exhaust_{state}_{time} folder.

    +
    +
    print("exhaust is DONE")
    +
    print("")
    +
    print("")
    +

    Plots for determining the sampling precision are shown below for a single snapshot model, 1_2min. (a) Tests the convergence of the lowest scoring model (snapshot_{state}_{time}.Top_Score_Conv.pdf). Error bars represent standard deviations of the best scores, estimated by selecting different subsets of models 10 times. The light-blue line indicates a lower bound reference on the total score. (b) Tests that the scores of two independently sampled models come from the same distribution (snapshot_{state}_{time}.Score_Dist.pdf). The difference between the two distributions, as measured by the KS test statistic (D) and KS test p-value (p) indicates that the difference is both statistically insignificant (p>0.05) and small in magnitude (D<0.3). (c) Determines the structural precision of a snapshot model (snapshot_{state}_{time}.ChiSquare.pdf). RMSD clustering is performed at 1 Å intervals until the clustered population (% clustered) is greater than 80%, and either the χ2 p-value is greater than 0.05 or Cramer’s V is less than 0.1. The sampling precision is indicated by the dashed black line. (d) Populations from sample 1 and sample 2 are shown for each cluster (snapshot_{state}_{time}.Cluster_Population.pdf).

    +

    +

    Further structural analysis can be calculated by using the cluster.* files. The cluster.*.{sample}.txt files contain the model number for the models in that cluster, where {sample} indicates which round of sampling the models came from. The cluster.* folder contains an RMF for centroid model of that cluster, along with the localization densities for each protein. The localization densities of each protein from each independent sampling can be compared to ensure independent samplings produce the same results.

    +

    Ideally, each of these plots should be checked for each snapshot model. As a way to summarize the output of these checks, we can gather the results of the KS test and the sampling precision test for all snapshot models. This is done by running extract_exhaust_data and save_exhaust_data_as_png, which write KS_sampling_precision_output.txt and KS_sampling_precision_output.png, respectively.

    +
    +
    print("extract_exhaust_data is DONE")
    +
    print("")
    +
    print("")
    +
    +
    +
    print("save_exhaust_data_as_png is DONE")
    +
    print("")
    +
    print("")
    +

    These codes write a table that include the KS two sample test statistic (D), the KS test p-value, and the sampling precision for each snapshot model, which is replotted below.

    +

    +

    +Visualizing models

    +

    The resulting RMF files and localization densities from this analysis can be viewed in UCSF Chimera (version>=1.13) or UCSF ChimeraX.

    +

    Here, we plotted each centroid model (A - blue, B - orange, and C - purple) from the most populated cluster for each snapshot model and compared that model to the experimental EM profile (gray).

    +

    +

    Finally, now that snapshot models were assessed, we can perform modeling of a trajectory.

    +

    +Modeling of a Trajectory

    +

    Here, we describe the final modeling problem in our composite workflow, how to build models of trajectory models using IMP.

    +

    +Trajectory modeling step 1: gathering of information

    +

    We begin trajectory modeling with the first step of integrative modeling, gathering information. Trajectory modeling utilizes dynamic information about the bimolecular process. In this case, we utilize heterogeneity models, snapshot models, physical theories, and synthetically generated small-angle X-ray scattering (SAXS) profiles.

    +

    +

    Heterogeneity models inform the possible compositional states at each time point and measure how well a compositional state agrees with input information. Snapshot models provide structural models for each heterogeneity model and measure how well those structural models agree with input information about their structure. Physical theories of macromolecular dynamics inform transitions between states. SAXS data informs the size and shape of the assembling complex and is left for validation.

    +

    +Trajectory modeling step 2: representation, scoring function, and search process

    +

    Trajectory modeling connects alternative snapshot models at adjacent time points, followed by scoring the trajectory models based on their fit to the input information, as described in full here.

    +

    +Background behind integrative spatiotemporal modeling

    +

    +Representing the model

    +

    We choose to represent dynamic processes as a trajectory of snapshot models, with one snapshot model at each time point. In this case, we computed snapshot models at 3 time points (0, 1, and 2 minutes), so a single trajectory model will consist of 3 snapshot models, one at each 0, 1, and 2 minutes. The modeling procedure described here will produce a set of scored trajectory models, which can be displayed as a directed acyclic graph, where nodes in the graph represent the snapshot model and edges represent connections between snapshot models at neighboring time points.

    +

    +Scoring the model

    +

    To score trajectory models, we incorporate both the scores of individual snapshot models, as well as the scores of transitions between them. Under the assumption that the process is Markovian (i.e. memoryless), the weight of a trajectory model takes the form:

    +

    $$ W(\chi) \propto \displaystyle\prod^{T}_{t=0} P( X_{N,t}, N_{t} | D_{t}) \cdot \displaystyle\prod^{T-1}_{t=0} W(X_{N,t+1},N_{t+1} | X_{N,t},N_{t}, D_{t,t+1}), $$

    +

    where $t$ indexes times from 0 until the final modeled snapshot ($T$); $P(X_{N,t}, N_{t} | D_{t})$ is the snapshot model score; and \(W(X_{N,t+1},N_{t+1} | X_{N,t},N_{t}, D_{t,t+1})\) is the transition score. Trajectory model weights ($W(\chi)$) are normalized so that the sum of all trajectory models' weights is 1.0. Transition scores are currently based on a simple metric that either allows or disallows a transition. Transitions are only allowed if all proteins in the first snapshot model are included in the second snapshot model. In the future, we hope to include more detailed transition scoring terms, which may take into account experimental information or physical models of macromolecular dynamics.

    +

    +Searching for good scoring models

    +

    Trajectory models are constructed by enumerating all connections between adjacent snapshot models and scoring these trajectory models according to the equation above. This procedure results in a set of weighted trajectory models.

    +

    +Computing trajectory models

    +

    To compute trajectory models, we first copy all necessary files to a new directory, data. These files are (i) {state}_{time}.config files, which include the subcomplexes that are in each state, (ii) {state}_{time}_scores.log, which is a list of all scores of all structural models in that snapshot model, and (iii) exp_comp{prot}.csv, which is the experimental copy number for each protein ({prot}) as a function of time. Here, we copy files related to the snapshots (*.log files) from the modeling directory, as we skipped computing snapshots due to the computational expense.

    +
    def merge_scores(fileA, fileB, outputFile):
    +
    """
    +
    For each function merges scoresA.txt and scoresB.txt into {state}_{time}_scores.log
    +
    +
    :param fileA: path to scoresA.txt
    +
    :param fileB: path to scoresB.txt
    +
    :param outputFile: path to output merged .log file named {state}_{time}_scores.log for each snapshot.
    +
    This type of .log file is used in crete_DAG to generate trajectory model.
    +
    """
    +
    # open both files, so data can be extracted
    +
    with open(fileA, 'r') as file_a:
    +
    data_a = file_a.readlines()
    +
    +
    with open(fileB, 'r') as file_b:
    +
    data_b = file_b.readlines()
    +
    +
    # Merge the content of both files
    +
    merged_data = data_a + data_b
    +
    +
    # Write the merged content into the output file
    +
    with open(outputFile, 'w') as output:
    +
    output.writelines(merged_data)
    +
    +
    def create_data_and_copy_files(state_dict, custom_source_dir1 = None, custom_source_dir2 = None, custom_source_dir3 = None):
    +
    """
    +
    Copies three types of files important to generate trajectory models:
    +
    -.config files created with start_sim.py in Snapshot_Modeling (source_dir1)
    +
    -time-dependent stoichiometry data for each timepoint. Data should be presented in .csv file. With this function all
    +
    csv file in source_dir2 will be copied. These .csv files will be used in the exp_comp dictionary in create_DAG
    +
    function
    +
    -scoresA and scoresB for each snapshot created with imp sampcon exhaust
    +
    (source_dir1 + snapshot + good_scoring_models) are merged into total score .txt using merge_scores helper function.
    +
    All copied files are gathered in newly created './data/' directory, where everything is prepared for create_DAG
    +
    function.
    +
    +
    +
    :param state_dict (dict): state_dict: dictionary that defines the spatiotemporal model.
    +
    The keys are strings that correspond to each time point in the
    +
    stepwise temporal process. Keys should be ordered according to the
    +
    steps in the spatiotemporal process. The values are integers that
    +
    correspond to the number of possible states at that timepoint.
    +
    :param custom_source_dir1 (optional - str): Custom path to heterogeneity modeling dir (heterogeneity_modeling.py),
    +
    to copy .config files
    +
    :param custom_source_dir2 (optional - str): Custom path to stoichiometry data dir
    +
    :param custom_source_dir3 (optional - str): Custom path to snapshot modeling dir (start_sim.py), to copy .config
    +
    files and to access scoresA/scoresB (custom_source_dir3 + snapshot{state}_{time} + 'good_scoring_models')
    +
    """
    +
    +
    # Create the destination directory if it does not exist (./data/). Here all the
    +
    destination_dir = './data/'
    +
    os.makedirs(destination_dir, exist_ok=True)
    +
    +
    # Path to heterogeneity modeling dir
    +
    if custom_source_dir1:
    +
    source_dir1 = custom_source_dir1
    +
    else:
    +
    source_dir1 = '../../Heterogeneity/Heterogeneity_Modeling/'
    +
    +
    # Path to stoichiometry data dir
    +
    if custom_source_dir2:
    +
    source_dir2 = custom_source_dir2
    +
    else:
    +
    source_dir2 = '../../Input_Information/gen_FCS/'
    +
    +
    # Path to snapshot modeling dir
    +
    if custom_source_dir3:
    +
    source_dir3 = custom_source_dir3
    +
    else:
    +
    source_dir3 = '../../Snapshots/Snapshots_Modeling/'
    +
    +
    # Copy all .config files from the first source directory to the destination directory
    +
    try:
    +
    for file_name in os.listdir(source_dir1):
    +
    if file_name.endswith('.config'):
    +
    full_file_name = os.path.join(source_dir1, file_name)
    +
    if os.path.isfile(full_file_name):
    +
    shutil.copy(full_file_name, destination_dir)
    +
    print(".config files are copied")
    +
    except Exception as e:
    +
    print(f".config files cannot be copied. Try do do it manually. Reason for Error: {e}")
    +
    +
    # Copy all .csv stoichiometry files from the second source directory to the destination directory
    +
    try:
    +
    for file_name in os.listdir(source_dir2):
    +
    if file_name.endswith('.csv'):
    +
    full_file_name = os.path.join(source_dir2, file_name)
    +
    if os.path.isfile(full_file_name):
    +
    shutil.copy(full_file_name, destination_dir)
    +
    print(".csv stoichiometry files are copied")
    +
    except Exception as e:
    +
    print(f".csv stoichiometry files cannot be copied. Try do do it manually. Reason for Error: {e}")
    +
    +
    # Copy scoresA and scoresB from the snapshot_{state}_{time} directories and first source directory path
    +
    for time in state_dict.keys():
    +
    for state in range(1, state_dict[time] + 1):
    +
    dir_name = f"snapshot{state}_{time}"
    +
    good_scoring_path = "good_scoring_models"
    +
    file_a = os.path.join(source_dir3, dir_name, good_scoring_path, "scoresA.txt")
    +
    file_b = os.path.join(source_dir3, dir_name, good_scoring_path, "scoresB.txt")
    +
    output_file = os.path.join(destination_dir, f"{state}_{time}_scores.log") # name of the output file
    +
    +
    try:
    +
    # Ensure the directory exists before try to read/write files
    +
    if os.path.exists(file_a) and os.path.exists(file_b):
    +
    merge_scores(file_a, file_b, output_file) # call helper function to merge files
    +
    print(f"Scores for snapshot{state}_{time} have been merged and saved")
    +
    else: # many things can go wrong here, so it is good to know where is the problem
    +
    print(f"Path doesn't exist: {source_dir3}")
    +
    print(f"Files not found in directory: {dir_name}")
    +
    print(f"Files not found in directory: {file_a}")
    +
    print(f"Files not found in directory: {file_b}")
    +
    print(f"Output directory doesn't exist: {destination_dir}")
    +
    except Exception as e:
    +
    print(f"total scores files cannot be copied of merged. Reason for Error: {e}")
    +
    +
    # copy all the relevant files for create_DAG
    +
    # it is important that everything starts from main dir
    +
    main_dir = os.getcwd()
    +
    os.chdir(main_dir)
    +
    state_dict = {'0min': 3, '1min': 3, '2min': 1}
    +
    create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../modeling/Input_Information/gen_FCS/', custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')
    +
    +
    # then trajectory model is created based on the all copied data
    +
    expected_subcomplexes = ['A', 'B', 'C']
    +
    exp_comp = {'A': 'exp_compA.csv', 'B': 'exp_compB.csv', 'C': 'exp_compC.csv'}
    +
    input = './data/'
    +
    output = "../output/"
    +

    Next, we compute the spatiotemporal model. The inputs we included are:

      +
    • state_dict (dict): a dictionary that defines the spatiotemporal model. Keys are strings for each time point in the spatiotemporal process and values are integers corresponding to the number of snapshot models computed at that time point
    • +
    • out_pdf (bool): whether to write the probability distribution function (pdf).
    • +
    • npaths (int): Number of states two write to a file (path*.txt).
    • +
    • input_dir (str): directory with the input information.
    • +
    • scorestr (str): final characters at the end of the score files.
    • +
    • output_dir (str): directory to which model will be written. Will be created if it does not exist.
    • +
    • spatio_temporal_rule (bool): whether to include our transition scoring term, which enforces that all proteins in the first snapshot model are included in the second snapshot model.
    • +
    • expected_subcomplexes (list): list of string objects, which is the subcomplexes to look when enforcing the spatiotemporal rule. Strings should be substrings of those in {state}_{time}.config files.
    • +
    • score_comp (bool): whether to score the composition of each snapshot model.
    • +
    • exp_comp_map (dictionary): key is a string with the name of each protein that will undergo composition scoring, value is the .csv file with the copy number data for that protein.
    • +
    • draw_dag (bool): whether to write out an image of the directed acyclic graph.
    • +
    +
    nodes, graph, graph_prob, graph_scores = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,
    +
    input_dir=input, scorestr='_scores.log',
    +
    output_dir=output, spatio_temporal_rule=True,
    +
    expected_subcomplexes=expected_subcomplexes,
    +
    score_comp=True, exp_comp_map=exp_comp,
    +
    draw_dag=True)
    +

    After running spatiotemporal.create_DAG, a variety of outputs are written:

      +
    • cdf.txt: the cumulative distribution function for the set of trajectory models.
    • +
    • pdf.txt: the probability distribution function for the set of trajectory models.
    • +
    • labeled_pdf.txt: Each row has 2 columns and represents a different trajectory model. The first column labels a single trajectory model as a series of snapshot models, where each snapshot model is written as {state}_{time}| in sequential order. The second column is the probability distribution function corresponding to that trajectory model.
    • +
    • dag_heatmap.eps and dag_heatmap: image of the directed acyclic graph from the set of models.
    • +
    • path*.txt: files where each row includes a {state}_{time} string, so that rows correspond to the states visited over that trajectory model. Files are numbered from the most likely path to the least likely path.
    • +
    +

    Now that we have a trajectory model, we can plot the directed acyclic graph (left) and the series of centroid models from each snapshot model along the most likely trajectory model (right). Each row corresponds to a different time point in the assembly process (0 min, 1 min, and 2 min). Each node is shaded according to its weight in the final model ($W(X_{N,t}N_{t})$). Proteins are colored as A - blue, B - orange, and C - purple.

    +

    +
    + +
    +

    +Trajectory modeling step 3: assessment

    +

    Now that the set of spatiotemporal models has been constructed, we must evaluate these models. We can evaluate these models in at least 4 ways: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model.

    +

    +Sampling precision

    +

    To begin, we calculate the sampling precision of the models. The sampling precision is calculated by using spatiotemporal.create_DAG to reconstruct the set of trajectory models using 2 independent sets of samplings for snapshot models. Then, the overlap between these snapshot models is evaluated using analysis.temporal_precision, which takes in two labeled_pdf files.

    +

    The temporal precision can take values between 1.0 and 0.0, and indicates the overlap between the two models in trajectory space. Hence, values close to 1.0 indicate a high sampling precision, while values close to 0.0 indicate a low sampling precision. Here, the value close to 1.0 indicates that sampling does not affect the weights of the trajectory models.

    +
    +
    +
    # 1 - copy_files_for_data (copy all relevant files into 'data' directory)
    +
    def copy_files_for_data(state_dict, custom_source_dir1 = None, custom_source_dir2 = None, custom_source_dir3 = None):
    +
    """
    +
    Copies three types of files important to generate trajectory models:
    +
    -.config files created with start_sim.py in Snapshot_Modeling (source_dir1)
    +
    -time-dependent stoichiometry data for each timepoint. Data should be presented in .csv file. With this function all
    +
    csv file in source_dir2 will be copied. These .csv files will be used in the exp_comp dictionary in create_DAG
    +
    function
    +
    -scoresA and scoresB for each snapshot created with imp sampcon exhaust
    +
    (source_dir1 + snapshot + good_scoring_models) are merged into total score .txt using merge_scores helper function.
    +
    All copied files are gathered in newly created './data/' directory, where everything is prepared for create_DAG
    +
    function.
    +
    +
    +
    :param state_dict (dict): state_dict: dictionary that defines the spatiotemporal model.
    +
    The keys are strings that correspond to each time point in the
    +
    stepwise temporal process. Keys should be ordered according to the
    +
    steps in the spatiotemporal process. The values are integers that
    +
    correspond to the number of possible states at that timepoint.
    +
    :param custom_source_dir1 (optional - str): Custom path to heterogeneity modeling dir (heterogeneity_modeling.py),
    +
    to copy .config files
    +
    :param custom_source_dir2 (optional - str): Custom path to stoichiometry data dir
    +
    :param custom_source_dir3 (optional - str): Custom path to snapshot modeling dir (start_sim.py), to copy .config
    +
    files and to access scoresA/scoresB (custom_source_dir3 + snapshot{state}_{time} + 'good_scoring_models')
    +
    """
    +
    # Create the destination directory for all the data copied in this function
    +
    destination_dir = './data/'
    +
    os.makedirs(destination_dir, exist_ok=True)
    +
    +
    # path to snapshot modeling dir
    +
    if custom_source_dir1:
    +
    source_dir1 = custom_source_dir1
    +
    else:
    +
    source_dir1 = '../../Heterogeneity/Heterogeneity_Modeling/'
    +
    +
    # path to stoichiometry data dir
    +
    if custom_source_dir2:
    +
    source_dir2 = custom_source_dir1
    +
    else:
    +
    source_dir2 = '../../Input_Information/gen_FCS/'
    +
    +
    # path to snapshot modeling dir
    +
    if custom_source_dir3:
    +
    source_dir3 = custom_source_dir3
    +
    else:
    +
    source_dir3 = '../../Snapshots/Snapshots_Modeling/'
    +
    +
    # Copy all .config files from the first source directory to the destination directory
    +
    try:
    +
    for file_name in os.listdir(source_dir1):
    +
    if file_name.endswith('.config'):
    +
    full_file_name = os.path.join(source_dir1, file_name)
    +
    if os.path.isfile(full_file_name):
    +
    shutil.copy(full_file_name, destination_dir)
    +
    print(".config files are copied")
    +
    except Exception as e:
    +
    print(f".config files cannot be copied. Try do do it manually. Reason for Error: {e}")
    +
    +
    # Copy all .csv stoichiometry files from the second source directory to the destination directory
    +
    try:
    +
    for file_name in os.listdir(source_dir2):
    +
    if file_name.endswith('.csv'):
    +
    full_file_name = os.path.join(source_dir2, file_name)
    +
    if os.path.isfile(full_file_name):
    +
    shutil.copy(full_file_name, destination_dir)
    +
    print(".csv stoichiometry files are copied")
    +
    except Exception as e:
    +
    print(f".csv stoichiometry files cannot be copied. Try do do it manually. Reason for Error: {e}")
    +
    +
    # Copy scoresA and scoresB from the snapshot_{state}_{time} directories and first source directory path
    +
    try:
    +
    for time in state_dict.keys():
    +
    for state in range(1, state_dict[time] + 1):
    +
    snapshot_dir = os.path.join(source_dir3, f'snapshot{state}_{time}')
    +
    good_scoring_models_dir = os.path.join(snapshot_dir, 'good_scoring_models')
    +
    if os.path.isdir(good_scoring_models_dir):
    +
    for score_file in ['scoresA.txt', 'scoresB.txt']:
    +
    full_file_name = os.path.join(good_scoring_models_dir, score_file)
    +
    if os.path.isfile(full_file_name):
    +
    new_file_name = f'{state}_{time}_{os.path.splitext(score_file)[0]}.log'
    +
    shutil.copy(full_file_name, os.path.join(destination_dir, new_file_name))
    +
    print(f"Copied {full_file_name} to {os.path.join(destination_dir, new_file_name)}")
    +
    except Exception as e:
    +
    print(f"scoresA.txt and scoresB.txt cannot be copied. Try do do it manually. Reason for Error: {e}")
    +
    +
    os.chdir(main_dir)
    +
    # copy all the relevant files
    +
    copy_files_for_data(state_dict, custom_source_dir1='../modeling/Heterogeneity/Heterogeneity_Modeling/',
    +
    custom_source_dir2='../modeling/Input_Information/gen_FCS/',
    +
    custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')
    +
    +
    # create two independent DAGs
    +
    expected_subcomplexes = ['A', 'B', 'C']
    +
    exp_comp = {'A': 'exp_compA.csv', 'B': 'exp_compB.csv', 'C': 'exp_compC.csv'}
    +
    input = "./data/"
    +
    outputA = "../output_modelA/"
    +
    outputB = "../output_modelB/"
    +
    +
    # Output from sampling precision and model precision to be saved in united dir: analysis_output_precision
    +
    analysis_output = "./analysis_output_precision/"
    +
    os.makedirs(analysis_output, exist_ok=True)
    +
    +
    nodesA, graphA, graph_probA, graph_scoresA = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,
    +
    input_dir=input, scorestr='_scoresA.log',
    +
    output_dir=outputA,
    +
    spatio_temporal_rule=True,
    +
    expected_subcomplexes=expected_subcomplexes,
    +
    score_comp=True, exp_comp_map=exp_comp,
    +
    draw_dag=False)
    +
    +
    os.chdir(main_dir)
    +
    nodesB, graphB, graph_probB, graph_scoresB = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,
    +
    input_dir=input, scorestr='_scoresB.log',
    +
    output_dir=outputB,
    +
    spatio_temporal_rule=True,
    +
    expected_subcomplexes=expected_subcomplexes,
    +
    score_comp=True, exp_comp_map=exp_comp,
    +
    draw_dag=False)
    +
    +
    +
    analysis.temporal_precision(outputA + 'labeled_pdf.txt', outputB + 'labeled_pdf.txt',
    +
    output_fn='.' + analysis_output + 'temporal_precision.txt')
    +
    os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main
    +
    print("Step 1: calculation of temporal precision IS COMPLETED")
    +
    print("")
    +
    print("")
    +

    +Model precision

    +

    Next, we calculate the precision of the model, using analysis.precision. Here, the model precision calculates the number of trajectory models with high weights. The precision ranges from 1.0 to 1/d, where d is the number of trajectory models. Values approaching 1.0 indicate the model set can be described by a single trajectory model, while values close to 1/d indicate that all trajectory models have similar weights.

    +

    The analysis.precision function reads in the labeled_pdf of the complete model, and calculates the precision of the model. The value close to 1.0 indicates that the set of models can be sufficiently represented by a single trajectory model.

    +
    +
    +
    # precision is calculated from .labeled_pdf.txt in Trajectories_Modeling dir
    +
    trajectories_modeling_input_dir = "./output/"
    +
    +
    analysis.precision(trajectories_modeling_input_dir + 'labeled_pdf.txt', output_fn=analysis_output + 'precision.txt')
    +
    +
    os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main
    +
    print("Step 2: calculation of precision of the model IS COMPLETED")
    +
    print("")
    +
    print("")
    +

    +Comparison against data used in model construction

    +

    We then evaluate the model against data used in model construction. First, we can calculate the cross-correlation between the original EM map and the forward density projected from each snapshot model. This calculation is too computationally expensive for this notebook, but can be found in modeling/Trajectories/Trajectories_Assessment, where we wrote the ccEM function to perform this comparison for all snapshot models.

    +
    +
    ccEM(exp_mrc_base_path)
    +
    print("Step 3a: ET validation IS COMPLETED")
    +
    print("")
    +
    print("")
    +

    The results of this comparison are shown below.

    +

    After comparing the model to EM data, we aimed to compare the model to copy number data, and wrote the forward_model_copy_number function to evaluate the copy numbers from our set of trajectory models. The output of forward_model_copy_number is written in forward_model_copy_number/. The folder contains CN_prot_{prot}.txt files for each protein, which have the mean and standard deviation of protein copy number at each time point. We can then plot these copy numbers from the forward models against those from the experiment, as shown below.

    +
    def read_labeled_pdf(pdf_file):
    +
    """
    +
    Function to read in a labeled probability distribution file output by spatiotemporal.create_DAG.
    +
    Used to determine protein copy numbers by forward_model_copy_number.
    +
    :param pdf_file (str): sting for the path of the labeled probability distribution file output by
    +
    spatiotemporal.create_DAG.
    +
    :return prob_dict (dict): dictionary defining the spatiotemporal model. Each key is a state, and each value is the
    +
    probability of that state.
    +
    """
    +
    # create blank dictonary to store the results
    +
    prob_dict = {}
    +
    # read in labeled pdf file
    +
    old = open(pdf_file, 'r')
    +
    line = old.readline()
    +
    # store the path through various nodes, as well as the probability of that path
    +
    while line:
    +
    line_split = line.split()
    +
    # assumes the first string is the trajectory string, the second string is the probability
    +
    if len(line_split) > 1:
    +
    # use # for comments
    +
    if line_split[0]=='#':
    +
    pass
    +
    else:
    +
    trj = line_split[0]
    +
    prob = float(line_split[1])
    +
    # store in dictionary
    +
    prob_dict[trj] = prob
    +
    line = old.readline()
    +
    old.close()
    +
    return prob_dict
    +
    +
    def copy_number_from_state(prot_list,trj,custom_data_folder = None):
    +
    """
    +
    For a trajectory, returns an array of protein copy numbers as a function of time. Used by
    +
    forward_model_copy_number().
    +
    :param prot_list (list): list of proteins in the model. These proteins are searched for in each config file.
    +
    :param trj (str): string defining a single trajectory.
    +
    :param custom_data_folder (str, optional): path to custom data folder. Defaults to None, which points to '../data/'
    +
    :return _prots (array): 2D array of protein copy numbers. The first index loops over the time,
    +
    while the second index value loops over the protein (ordered as A, B, C).
    +
    :return N (int): Number of time points in each trajectory.
    +
    """
    +
    # find folder with config files
    +
    if custom_data_folder:
    +
    data_folder = custom_data_folder
    +
    else:
    +
    data_folder = 'data/'
    +
    +
    # split the trajectory into a list of individual states
    +
    state_list=trj.split('|')
    +
    state_list=state_list[:-1]
    +
    +
    N = len(state_list)
    +
    # Map from index to protein: 0 - A, 1- B, 2- C
    +
    _prots = np.zeros((N, len(prot_list)))
    +
    +
    # Grab _prots from .config file
    +
    for i in range(0, N):
    +
    prot_file = data_folder + state_list[i] + '.config'
    +
    to_read = open(prot_file, 'r')
    +
    line = to_read.readline()
    +
    while line:
    +
    # for each line, check if the protein is in that line
    +
    for prot_index in range(len(prot_list)):
    +
    if prot_list[prot_index] in line:
    +
    _prots[i, prot_index] += 1
    +
    line = to_read.readline()
    +
    +
    return _prots,N
    +
    +
    def forward_model_copy_number(prot_list,custom_labeled_pdf=None):
    +
    """
    +
    Code to perform copy number analysis on each protein in the model. Writes output files where each row is ordered
    +
    according to the time point in the model and the first column is the mean copy number, while the second column is
    +
    the standard deviation in copy number.
    +
    :param prot_list (list): list of proteins in the model. These proteins are searched for in each config file.
    +
    :param custom_labeled_pdf (str, optional): path to custom labeled probability distribution file output by
    +
    spatiotemporal.create_DAG.
    +
    """
    +
    # find folder with config files
    +
    if custom_labeled_pdf:
    +
    _labeled_pdf = custom_labeled_pdf
    +
    else:
    +
    _labeled_pdf = '../Trajectories_Modeling/output/labeled_pdf.txt'
    +
    +
    # Read in labeled_pdf file into a dictionary. Each trajectory is listed as a dictionary,
    +
    # with keys as the trajectory and the values as the probability of that trajectory
    +
    prob_dict = read_labeled_pdf(_labeled_pdf)
    +
    +
    # Loop over the full dictionary. Create a list with 2 values:
    +
    # 1) the probability of the state, 2) the protein copy number of that state.
    +
    key_list = prob_dict.keys()
    +
    prot_prob = []
    +
    for key in key_list:
    +
    CN,N_times = copy_number_from_state(prot_list,key)
    +
    prot_prob.append([prob_dict[key], CN])
    +
    +
    # Construct the full path to the output directory
    +
    dir_name = "forward_model_copy_number"
    +
    full_path = os.path.join(main_dir, dir_name)
    +
    os.makedirs(full_path, exist_ok=True)
    +
    os.chdir(full_path)
    +
    +
    # Determine copy number from the prot_prob
    +
    for index in range(len(prot_prob[0][1][0])):
    +
    copy_number = np.zeros((N_times, 2))
    +
    # calculate mean
    +
    for state in prot_prob:
    +
    for i in range(N_times):
    +
    copy_number[i, 0] += state[0] * state[1][i][index]
    +
    # calculate std deviation
    +
    for state in prot_prob:
    +
    for i in range(N_times):
    +
    # Calculate variance
    +
    copy_number[i, 1] += state[0] * ((state[1][i][index] - copy_number[i, 0]) ** 2)
    +
    # Take square root to get the standard deviation
    +
    copy_number[:, 1] = np.sqrt(copy_number[:, 1])
    +
    # save to file
    +
    np.savetxt('CN_prot_'+prot_list[index]+'.txt', copy_number, header='mean CN\tstd CN')
    +
    +
    # 3b - comparison of the model to data used in modeling (copy number)
    +
    os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main
    +
    forward_model_copy_number(expected_subcomplexes,custom_labeled_pdf='output/labeled_pdf.txt')
    +
    print("Step 3b: copy number validation IS COMPLETED")
    +
    print("")
    +
    print("")
    +

    Here, we plot the comparison between the experimental data used in model construction and the set of trajectory models. This analysis includes the cross-correlation coefficient between the experimental EM density and the forward density of the set of sufficiently good scoring modeled structures in the highest weighted trajectory model (a), as well as comparisons between experimental and modeled protein copy numbers for proteins A (b), B (c), and C (d). Here, we see the model is in good agreement with the data used to construct it.

    +

    +

    +Validation against data not used in model construction

    +

    Finally, we aim to compare the model to data not used in model construction. Specifically, we reserved SAXS data for model validation. We aimed to compare the forward scattering profile from the centroid structural model of each snapshot model to the experimental profile. To make this comparison, we wrote functions that converted each centroid RMF to a PDB (convert_rmfs), copied the experimental SAXS profiles to the appropriate folder (copy_SAXS_dat_files), and ran FoXS on each PDB to evaluate its agreement to the experimental profile (process_foxs).

    +
    # 4a - SAXS
    +
    """
    +
    Comparing center models of the most dominant cluster for each snapshot (rmfs) to the SAXS data for each time point
    +
    can be done in two steps:
    +
    -converting rmfs to pdb files
    +
    -comparing pdbs of each snapshot to experimental SAXS profile using FoXS
    +
    """
    +
    +
    def convert_rmfs(state_dict, model, custom_path=None):
    +
    """
    +
    The purpose of this function is to automate the conversion of RMF files into PDB files for all the states from
    +
    state_dict. Created PDBs are further used in comparison of SAXS profiles using FoXS. Additionally, they can be
    +
    used for comparison to native PDB if available.
    +
    +
    :param state_dict (dict): dictionary that defines the spatiotemporal model.
    +
    The keys are strings that correspond to each time point in the
    +
    stepwise temporal process. Keys should be ordered according to the
    +
    steps in the spatiotemporal process. The values are integers that
    +
    correspond to the number of possible states at that timepoint.
    +
    :param model (str): An IMP (Integrative Modeling Platform) model object.
    +
    :param custom_path (optional - str): A custom path for the RMF file, allowing for flexibility in file location
    +
    (should be compliant with stat_dict).
    +
    """
    +
    +
    for time in state_dict.keys():
    +
    for state in range(1, state_dict[time] + 1):
    +
    if custom_path:
    +
    sim_rmf = custom_path # option for custom path
    +
    else:
    +
    sim_rmf = f"../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3"
    +
    +
    pdb_output = f"snapshot{state}_{time}.pdb" # define the output of converted .pdb file
    +
    +
    if os.path.exists(sim_rmf):
    +
    try:
    +
    rmf_fh = RMF.open_rmf_file_read_only(sim_rmf) # open rmf file for reading
    +
    rmf_hierarchy = IMP.rmf.create_hierarchies(rmf_fh, model)[0] # extract 1st hierarchy
    +
    IMP.atom.write_pdb_of_c_alphas(rmf_hierarchy, pdb_output) # write coordinates of CA to .pdb
    +
    print(f"Finishing: snapshot{state}_{time}.pdb")
    +
    except Exception as e:
    +
    print(f"{sim_rmf} is empty or there is another problem: {e}")
    +
    +
    +
    def copy_SAXS_dat_files(custom_src_dir = None):
    +
    """
    +
    Copies all files ending with .dat from the specified directory to the current directory.
    +
    +
    :param custom_src_dir (optional - str): Path to the source directory
    +
    """
    +
    if custom_src_dir:
    +
    src_dir = custom_src_dir
    +
    else:
    +
    src_dir = '../../../Input_Information/gen_SAXS'
    +
    try:
    +
    files = os.listdir(src_dir) # Get the list of all files in the src_dir directory
    +
    dat_files = [f for f in files if f.endswith('.dat')] # Filter out files that end with .dat
    +
    +
    # Copy each .dat file to the current directory, so FoXS can be used
    +
    for file_name in dat_files:
    +
    full_file_name = os.path.join(src_dir, file_name)
    +
    if os.path.isfile(full_file_name):
    +
    shutil.copy(full_file_name, os.getcwd())
    +
    # print(f"Copied: {full_file_name} to {main_dir}")
    +
    +
    print("All .dat files have been copied successfully...")
    +
    +
    except Exception as e:
    +
    print(f"An error occurred: {e}")
    +
    +
    +
    def process_foxs(state_dict, custom_dat_file = None):
    +
    """
    +
    This function automates the FoXS analysis for all specified time points in a single execution. It processes PDB
    +
    files generated by the convert_rmfs function and uses SAXS data copied with the copy_SAXS function. All of this
    +
    data should be present in the current running directory.
    +
    FoXS tutorial is available here: https://integrativemodeling.org/tutorials/foxs/foxs.html
    +
    +
    :param state_dict (dict): dictionary that defines the spatiotemporal model.
    +
    The keys are strings that correspond to each time point in the
    +
    stepwise temporal process. Keys should be ordered according to the
    +
    steps in the spatiotemporal process. The values are integers that
    +
    correspond to the number of possible states at that timepoint.
    +
    :param custom_dat_file (optional - str)): A custom name of SAXS files for each time point (should be
    +
    compliant with stat_dict)
    +
    """
    +
    +
    +
    print("...lets proceed to FoXS")
    +
    +
    for time in state_dict.keys():
    +
    try:
    +
    if state_dict[time] > 1:
    +
    # if there is more than one state in timepoint, FoXS creates fit.plt and it should be renamed
    +
    if custom_dat_file:
    +
    dat_file = custom_dat_file
    +
    else:
    +
    dat_file = f"{time}_exp.dat"
    +
    +
    pdb_files = " ".join([f"snapshot{state}_{time}.pdb" for state in range(1, state_dict[time] + 1)])
    +
    +
    command1 = f"foxs -r -g {pdb_files} {dat_file}"
    +
    # example how FoXS command should look like: foxs -r -g snapshot1_0min.pdb snapshot2_0min.pdb snapshot3_0min.pdb 0min_exp.dat
    +
    os.system(command1)
    +
    print(f"FoXS for {time} is calculated and ready to create a plot. Nr of states is: {state_dict[time]}")
    +
    +
    command2 = f"gnuplot fit.plt" # create plot from .plt code
    +
    os.system(command2)
    +
    +
    command3 = f"mv fit.plt {time}_FoXS.plt" # rename .plt to avoid to be overwritten
    +
    os.system(command3)
    +
    +
    command4 = f"mv fit.png {time}_FoXS.png" # rename plot to avoid to be overwritten
    +
    os.system(command4)
    +
    +
    print(f"Plot {time}_FoXS.png is created")
    +
    +
    elif state_dict[time] == 1:
    +
    print(f"There is only one state in {time}")
    +
    dat_file1 = f"{time}_exp.dat"
    +
    pdb_file1 = f"snapshot1_{time}.pdb"
    +
    +
    command5 = f"foxs -r -g {pdb_file1} {dat_file1}"
    +
    os.system(command5)
    +
    print(f"FoXS for {time} is calculated and ready to create a plot. Nr of states is: {state_dict[time]}")
    +
    +
    command6 = f"gnuplot snapshot1_{time}_{time}_exp.plt"
    +
    os.system(command6)
    +
    +
    command7 = f"mv snapshot1_{time}_{time}_exp.plt {time}_FoXS.plt"
    +
    os.system(command7)
    +
    +
    command8 = f"mv snapshot1_{time}_{time}_exp.png {time}_FoXS.png"
    +
    os.system(command8)
    +
    else:
    +
    print(f"There is no states in this timepoint. Check stat_dict.")
    +
    +
    except Exception as e:
    +
    print(f"FoXS can not be executed properly due to following problem: {e}")
    +
    +
    +
    # 4a - SAXS
    +
    os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main
    +
    SAXS_output = "./SAXS_comparison/"
    +
    os.makedirs(SAXS_output, exist_ok=True)
    +
    os.chdir(SAXS_output)
    +
    model = IMP.Model()
    +
    convert_rmfs(state_dict, model)
    +
    copy_SAXS_dat_files(custom_src_dir='../../modeling/Input_Information/gen_SAXS')
    +
    process_foxs(state_dict)
    +
    print("Step 4a: SAXS validation IS COMPLETED")
    +
    print("")
    +
    print("")
    + +

    The output of this analysis is written to SAXS_comparison. Standard FoXS outputs are available for each snapshot model (snapshot{state}_{time}.*). In particular, the .fit files include the forward and experimental profiles side by side, with the $\chi^2$ for the fit. Further, the {time}_FoXS.* files include the information for all snapshot models at that time point, including plots of each profile in comparison to the experimental profile ({time}_FoXS.png).

    +

    As our model was generated from synthetic data, the ground truth structure is known at each time point. In addition to validating the model by assessing its comparison to SAXS data, we could approximate the model accuracy by comparing the snapshot model to the PDB structure, although this comparison is not perfect as the PDB structure was used to inform the structure of rigid bodies in the snapshot model. To do so, we wrote a function (RMSD) that calculates the RMSD between each structural model and the orignal PDB. The function is too computationally expensive to run in this notebook, but is found in the Trajectories/Trajectories_Assessment/ folder and is demonstrated below.

    +
    +
    pdb_path = "../../Input_Information/PDB/3rpg.pdb"
    +
    RMSD(pdb_path=pdb_path, custom_n_plot=20)
    +
    print("Step 4b: RMSD validation IS COMPLETED")
    +
    print("")
    +
    print("")
    +

    The output of this function is written in RMSD_calculation_output. The function outputs rmsd_{state}_{time}.png files, which plots the RMSD for each structural model within each snapshot model. This data is then summarized in RMSD_analysis.txt, which includes the minimum RMSD, average RMSD, and number of structural models in each snapshot model.

    +

    Finally, we plot the results for assessing the spatiotemporal model with data not used to construct it. Comparisons are made between the centroid structure of the most populated cluster in each snapshot model at each time point and the experimental SAXS profile for 0 (a), 1 (b), and 2 (c) minutes. Further, we plot both the sampling precision (dark red) and the RMSD to the PDB structure (light red) for each snapshot model in the highest trajectory model (d).

    +

    +

    To quantitatively compare the model to SAXS data, we used the $\chi^2$ to compare each snapshot model to the experimental profile. We note that the $\chi^2$ are substantially lower for the models along the highest trajectory model (1_0min, 1_1min, and 1_2min) than for other models, indicating that the highest weighted trajectory model is in better agreement with the experimental SAXS data than other possible trajectory models.

    +

    +

    Next, we can evaluate the accuracy of the model by comparing the RMSD to the PDB to the sampling precision of each snapshot model. We note that this is generally not possible, because in most biological applications the ground truth is not known. In this case, if the average RMSD to the PDB structure is smaller than the sampling precision, the PDB structure lies within the precision of the model. We find that the RMSD is within 1.5 Å of the sampling precision at all time points, indicating that the model lies within 1.5 Å of the ground truth.

    +

    +Next steps

    +

    After assessing our model, we can must decide if the model is sufficient to answer biological questions of interest. If the model does not have sufficient precision for the desired application, assessment of the current model can be used to inform which new experiments may help improve the next iteration of the model. The integrative spatiotemporal modeling procedure can then be repeated iteratively, analogous to integrative modeling of static structures.

    +

    If the model is sufficient to provide insight into the biological process of interest, the user may decide that it is ready for publication. In this case, the user should create an mmCIF file to deposit the model in the PDB-dev database. This procedure is explained in the deposition tutorial.

    +
    +
    +
    + + + + + diff --git a/Jupyter/html/open.png b/Jupyter/html/open.png new file mode 100644 index 0000000000000000000000000000000000000000..30f75c7efe2dd0c9e956e35b69777a02751f048b GIT binary patch literal 123 zcmeAS@N?(olHy`uVBq!ia0vp^oFL4>1|%O$WD@{VPM$7~Ar*{o?;hlAFyLXmaDC0y znK1_#cQqJWPES%4Uujug^TE?jMft$}Eq^WaR~)%f)vSNs&gek&x%A9X9sM + + + + + + + +IMP Tutorial: Related Pages + + + + + + + + + + + + + + +
    + + + + + +
    + IMP logo + +
    +
    +
    +
    +
    +
    +
    + + + + + + +
    +
    IMP Tutorial +
    +
    +
    + + + + + + + +
    +
    + +
    +
    Related Pages
    +
    +
    +
    Here is a list of all related documentation pages:
    +
    +
    +
    + + + + + diff --git a/Jupyter/html/plus.svg b/Jupyter/html/plus.svg new file mode 100644 index 000000000..075201655 --- /dev/null +++ b/Jupyter/html/plus.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/Jupyter/html/plusd.svg b/Jupyter/html/plusd.svg new file mode 100644 index 000000000..0c65bfe94 --- /dev/null +++ b/Jupyter/html/plusd.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/Jupyter/html/resize.js b/Jupyter/html/resize.js new file mode 100644 index 000000000..7d8cdc7d4 --- /dev/null +++ b/Jupyter/html/resize.js @@ -0,0 +1,145 @@ +/* + @licstart The following is the entire license notice for the JavaScript code in this file. + + The MIT License (MIT) + + Copyright (C) 1997-2020 by Dimitri van Heesch + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software + and associated documentation files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, publish, distribute, + sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or + substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING + BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + @licend The above is the entire license notice for the JavaScript code in this file + */ + +function initResizable(treeview) { + let sidenav,navtree,content,header,footer,barWidth=6; + const RESIZE_COOKIE_NAME = ''+'width'; + + function resizeWidth() { + const sidenavWidth = $(sidenav).outerWidth(); + content.css({marginLeft:parseInt(sidenavWidth)+"px"}); + if (typeof page_layout!=='undefined' && page_layout==1) { + footer.css({marginLeft:parseInt(sidenavWidth)+"px"}); + } + Cookie.writeSetting(RESIZE_COOKIE_NAME,sidenavWidth-barWidth); + } + + function restoreWidth(navWidth) { + content.css({marginLeft:parseInt(navWidth)+barWidth+"px"}); + if (typeof page_layout!=='undefined' && page_layout==1) { + footer.css({marginLeft:parseInt(navWidth)+barWidth+"px"}); + } + sidenav.css({width:navWidth + "px"}); + } + + function resizeHeight(treeview) { + const headerHeight = header.outerHeight(); + const windowHeight = $(window).height(); + let contentHeight; + if (treeview) + { + const footerHeight = footer.outerHeight(); + let navtreeHeight,sideNavHeight; + if (typeof page_layout==='undefined' || page_layout==0) { /* DISABLE_INDEX=NO */ + contentHeight = windowHeight - headerHeight - footerHeight; + navtreeHeight = contentHeight; + sideNavHeight = contentHeight; + } else if (page_layout==1) { /* DISABLE_INDEX=YES */ + contentHeight = windowHeight - footerHeight; + navtreeHeight = windowHeight - headerHeight; + sideNavHeight = windowHeight; + } + navtree.css({height:navtreeHeight + "px"}); + sidenav.css({height:sideNavHeight + "px"}); + } + else + { + contentHeight = windowHeight - headerHeight; + } + content.css({height:contentHeight + "px"}); + if (location.hash.slice(1)) { + (document.getElementById(location.hash.slice(1))||document.body).scrollIntoView(); + } + } + + function collapseExpand() { + let newWidth; + if (sidenav.width()>0) { + newWidth=0; + } else { + const width = Cookie.readSetting(RESIZE_COOKIE_NAME,250); + newWidth = (width>250 && width<$(window).width()) ? width : 250; + } + restoreWidth(newWidth); + const sidenavWidth = $(sidenav).outerWidth(); + Cookie.writeSetting(RESIZE_COOKIE_NAME,sidenavWidth-barWidth); + } + + header = $("#top"); + content = $("#doc-content"); + footer = $("#nav-path"); + sidenav = $("#side-nav"); + if (!treeview) { +// title = $("#titlearea"); +// titleH = $(title).height(); +// let animating = false; +// content.on("scroll", function() { +// slideOpts = { duration: 200, +// step: function() { +// contentHeight = $(window).height() - header.outerHeight(); +// content.css({ height : contentHeight + "px" }); +// }, +// done: function() { animating=false; } +// }; +// if (content.scrollTop()>titleH && title.css('display')!='none' && !animating) { +// title.slideUp(slideOpts); +// animating=true; +// } else if (content.scrollTop()<=titleH && title.css('display')=='none' && !animating) { +// title.slideDown(slideOpts); +// animating=true; +// } +// }); + } else { + navtree = $("#nav-tree"); + $(".side-nav-resizable").resizable({resize: function(e, ui) { resizeWidth(); } }); + $(sidenav).resizable({ minWidth: 0 }); + } + $(window).resize(function() { resizeHeight(treeview); }); + if (treeview) + { + const device = navigator.userAgent.toLowerCase(); + const touch_device = device.match(/(iphone|ipod|ipad|android)/); + if (touch_device) { /* wider split bar for touch only devices */ + $(sidenav).css({ paddingRight:'20px' }); + $('.ui-resizable-e').css({ width:'20px' }); + $('#nav-sync').css({ right:'34px' }); + barWidth=20; + } + const width = Cookie.readSetting(RESIZE_COOKIE_NAME,250); + if (width) { restoreWidth(width); } else { resizeWidth(); } + } + resizeHeight(treeview); + const url = location.href; + const i=url.indexOf("#"); + if (i>=0) window.location.hash=url.substr(i); + const _preventDefault = function(evt) { evt.preventDefault(); }; + if (treeview) + { + $("#splitbar").bind("dragstart", _preventDefault).bind("selectstart", _preventDefault); + $(".ui-resizable-handle").dblclick(collapseExpand); + } + $(window).on('load',resizeHeight); +} +/* @license-end */ diff --git a/Jupyter/html/splitbar.png b/Jupyter/html/splitbar.png new file mode 100644 index 0000000000000000000000000000000000000000..fe895f2c58179b471a22d8320b39a4bd7312ec8e GIT binary patch literal 314 zcmeAS@N?(olHy`uVBq!ia0vp^Yzz!63>-{AmhX=Jf(#6djGiuzAr*{o?=JLmPLyc> z_*`QK&+BH@jWrYJ7>r6%keRM@)Qyv8R=enp0jiI>aWlGyB58O zFVR20d+y`K7vDw(hJF3;>dD*3-?v=<8M)@x|EEGLnJsniYK!2U1 Y!`|5biEc?d1`HDhPgg&ebxsLQ02F6;9RL6T literal 0 HcmV?d00001 diff --git a/Jupyter/html/splitbard.png b/Jupyter/html/splitbard.png new file mode 100644 index 0000000000000000000000000000000000000000..8367416d757fd7b6dc4272b6432dc75a75abd068 GIT binary patch literal 282 zcmeAS@N?(olHy`uVBq!ia0vp^Yzz!63>-{AmhX=Jf@VhhFKy35^fiT zT~&lUj3=cDh^%3HDY9k5CEku}PHXNoNC(_$U3XPb&Q*ME25pT;2(*BOgAf<+R$lzakPG`kF31()Fx{L5Wrac|GQzjeE= zueY1`Ze{#x<8=S|`~MgGetGce)#vN&|J{Cd^tS%;tBYTo?+^d68<#n_Y_xx`J||4O V@QB{^CqU0Kc)I$ztaD0e0svEzbJzd? literal 0 HcmV?d00001 diff --git a/Jupyter/html/sync_off.png b/Jupyter/html/sync_off.png new file mode 100644 index 0000000000000000000000000000000000000000..3b443fc62892114406e3d399421b2a881b897acc GIT binary patch literal 853 zcmV-b1FHOqP)oT|#XixUYy%lpuf3i8{fX!o zUyDD0jOrAiT^tq>fLSOOABs-#u{dV^F$b{L9&!2=9&RmV;;8s^x&UqB$PCj4FdKbh zoB1WTskPUPu05XzFbA}=KZ-GP1fPpAfSs>6AHb12UlR%-i&uOlTpFNS7{jm@mkU1V zh`nrXr~+^lsV-s1dkZOaI|kYyVj3WBpPCY{n~yd%u%e+d=f%`N0FItMPtdgBb@py; zq@v6NVArhyTC7)ULw-Jy8y42S1~4n(3LkrW8mW(F-4oXUP3E`e#g**YyqI7h-J2zK zK{m9##m4ri!7N>CqQqCcnI3hqo1I;Yh&QLNY4T`*ptiQGozK>FF$!$+84Z`xwmeMh zJ0WT+OH$WYFALEaGj2_l+#DC3t7_S`vHpSivNeFbP6+r50cO8iu)`7i%Z4BTPh@_m3Tk!nAm^)5Bqnr%Ov|Baunj#&RPtRuK& z4RGz|D5HNrW83-#ydk}tVKJrNmyYt-sTxLGlJY5nc&Re zU4SgHNPx8~Yxwr$bsju?4q&%T1874xxzq+_%?h8_ofw~(bld=o3iC)LUNR*BY%c0y zWd_jX{Y8`l%z+ol1$@Qa?Cy!(0CVIEeYpKZ`(9{z>3$CIe;pJDQk$m3p}$>xBm4lb zKo{4S)`wdU9Ba9jJbVJ0C=SOefZe%d$8=2r={nu<_^a3~>c#t_U6dye5)JrR(_a^E f@}b6j1K9lwFJq@>o)+Ry00000NkvXXu0mjfWa5j* literal 0 HcmV?d00001 diff --git a/Jupyter/html/sync_on.png b/Jupyter/html/sync_on.png new file mode 100644 index 0000000000000000000000000000000000000000..e08320fb64e6fa33b573005ed6d8fe294e19db76 GIT binary patch literal 845 zcmV-T1G4;yP)Y;xxyHF2B5Wzm| zOOGupOTn@c(JmBOl)e;XMNnZuiTJP>rM8<|Q`7I_))aP?*T)ow&n59{}X4$3Goat zgjs?*aasfbrokzG5cT4K=uG`E14xZl@z)F={P0Y^?$4t z>v!teRnNZym<6h{7sLyF1V0HsfEl+l6TrZpsfr1}luH~F7L}ktXu|*uVX^RG$L0`K zWs3j|0tIvVe(N%_?2{(iCPFGf#B6Hjy6o&}D$A%W%jfO8_W%ZO#-mh}EM$LMn7joJ z05dHr!5Y92g+31l<%i1(=L1a1pXX+OYnalY>31V4K}BjyRe3)9n#;-cCVRD_IG1fT zOKGeNY8q;TL@K{dj@D^scf&VCs*-Jb>8b>|`b*osv52-!A?BpbYtTQBns5EAU**$m zSnVSm(teh>tQi*S*A>#ySc=n;`BHz`DuG4&g4Kf8lLhca+zvZ7t7RflD6-i-mcK=M z!=^P$*u2)bkY5asG4gsss!Hn%u~>}kIW`vMs%lJLH+u*9<4PaV_c6U`KqWXQH%+Nu zTv41O(^ZVi@qhjQdG!fbZw&y+2o!iYymO^?ud3{P*HdoX83YV*Uu_HB=?U&W9%AU# z80}k1SS-CXTU7dcQlsm<^oYLxVSseqY6NO}dc`Nj?8vrhNuCdm@^{a3AQ_>6myOj+ z`1RsLUXF|dm|3k7s2jD(B{rzE>WI2scH8i1;=O5Cc9xB3^aJk%fQjqsu+kH#0=_5a z0nCE8@dbQa-|YIuUVvG0L_IwHMEhOj$Mj4Uq05 X8=0q~qBNan00000NkvXXu0mjfptF>5 literal 0 HcmV?d00001 diff --git a/Jupyter/html/tab_a.png b/Jupyter/html/tab_a.png new file mode 100644 index 0000000000000000000000000000000000000000..3b725c41c5a527a3a3e40097077d0e206a681247 GIT binary patch literal 142 zcmeAS@N?(olHy`uVBq!ia0vp^j6kfy!2~3aiye;!QlXwMjv*C{Z|8b*H5dputLHD# z=<0|*y7z(Vor?d;H&?EG&cXR}?!j-Lm&u1OOI7AIF5&c)RFE;&p0MYK>*Kl@eiymD r@|NpwKX@^z+;{u_Z~trSBfrMKa%3`zocFjEXaR$#tDnm{r-UW|TZ1%4 literal 0 HcmV?d00001 diff --git a/Jupyter/html/tab_ad.png b/Jupyter/html/tab_ad.png new file mode 100644 index 0000000000000000000000000000000000000000..e34850acfc24be58da6d2fd1ccc6b29cc84fe34d GIT binary patch literal 135 zcmeAS@N?(olHy`uVBq!ia0vp^j6kfy!2~3aiye;!QhuH;jv*C{Z|5d*H3V=pKi{In zd2jxLclDRPylmD}^l7{QOtL{vUjO{-WqItb5sQp2h-99b8^^Scr-=2mblCdZuUm?4 jzOJvgvt3{(cjKLW5(A@0qPS@<&}0TrS3j3^P6y&q2{!U5bk+Tso_B!YCpDh>v z{CM*1U8YvQRyBUHt^Ju0W_sq-?;9@_4equ-bavTs=gk796zopr0EBT&m;e9( literal 0 HcmV?d00001 diff --git a/Jupyter/html/tab_s.png b/Jupyter/html/tab_s.png new file mode 100644 index 0000000000000000000000000000000000000000..ab478c95b67371d700a20869f7de1ddd73522d50 GIT binary patch literal 184 zcmeAS@N?(olHy`uVBq!ia0vp^j6kfy!2~3aiye;!QuUrLjv*C{Z|^p8HaRdjTwH7) zC?wLlL}}I{)n%R&r+1}IGmDnq;&J#%V6)9VsYhS`O^BVBQlxOUep0c$RENLq#g8A$ z)z7%K_bI&n@J+X_=x}fJoEKed-$<>=ZI-;YrdjIl`U`uzuDWSP?o#Dmo{%SgM#oan kX~E1%D-|#H#QbHoIja2U-MgvsK&LQxy85}Sb4q9e0Efg%P5=M^ literal 0 HcmV?d00001 diff --git a/Jupyter/html/tab_sd.png b/Jupyter/html/tab_sd.png new file mode 100644 index 0000000000000000000000000000000000000000..757a565ced4730f85c833fb2547d8e199ae68f19 GIT binary patch literal 188 zcmeAS@N?(olHy`uVBq!ia0vp^j6kfy!2~3aiye;!Qq7(&jv*C{Z|_!fH5o7*c=%9% zcILh!EA=pAQKdx-Cdiev=v{eg{8Ht<{e8_NAN~b=)%W>-WDCE0PyDHGemi$BoXwcK z{>e9^za6*c1ilttWw&V+U;WCPlV9{LdC~Ey%_H(qj`xgfES(4Yz5jSTZfCt`4E$0YRsR*S^mTCR^;V&sxC8{l_Cp7w8-YPgg&ebxsLQ00$vXK>z>% literal 0 HcmV?d00001 diff --git a/Jupyter/html/tabs.css b/Jupyter/html/tabs.css new file mode 100644 index 000000000..fe4854aa5 --- /dev/null +++ b/Jupyter/html/tabs.css @@ -0,0 +1 @@ +.sm{position:relative;z-index:9999}.sm,.sm ul,.sm li{display:block;list-style:none;margin:0;padding:0;line-height:normal;direction:ltr;text-align:left;-webkit-tap-highlight-color:rgba(0,0,0,0)}.sm-rtl,.sm-rtl ul,.sm-rtl li{direction:rtl;text-align:right}.sm>li>h1,.sm>li>h2,.sm>li>h3,.sm>li>h4,.sm>li>h5,.sm>li>h6{margin:0;padding:0}.sm ul{display:none}.sm li,.sm a{position:relative}.sm a{display:block}.sm a.disabled{cursor:not-allowed}.sm:after{content:"\00a0";display:block;height:0;font:0/0 serif;clear:both;visibility:hidden;overflow:hidden}.sm,.sm *,.sm *:before,.sm *:after{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.main-menu-btn{position:relative;display:inline-block;width:36px;height:36px;text-indent:36px;margin-left:8px;white-space:nowrap;overflow:hidden;cursor:pointer;-webkit-tap-highlight-color:rgba(0,0,0,0)}.main-menu-btn-icon,.main-menu-btn-icon:before,.main-menu-btn-icon:after{position:absolute;top:50%;left:2px;height:2px;width:24px;background:var(--nav-menu-button-color);-webkit-transition:all .25s;transition:all .25s}.main-menu-btn-icon:before{content:'';top:-7px;left:0}.main-menu-btn-icon:after{content:'';top:7px;left:0}#main-menu-state:checked ~ .main-menu-btn .main-menu-btn-icon{height:0}#main-menu-state:checked ~ .main-menu-btn .main-menu-btn-icon:before{top:0;-webkit-transform:rotate(-45deg);transform:rotate(-45deg)}#main-menu-state:checked ~ .main-menu-btn .main-menu-btn-icon:after{top:0;-webkit-transform:rotate(45deg);transform:rotate(45deg)}#main-menu-state{position:absolute;width:1px;height:1px;margin:-1px;border:0;padding:0;overflow:hidden;clip:rect(1px,1px,1px,1px)}#main-menu-state:not(:checked) ~ #main-menu{display:none}#main-menu-state:checked ~ #main-menu{display:block}@media(min-width:768px){.main-menu-btn{position:absolute;top:-99999px}#main-menu-state:not(:checked) ~ #main-menu{display:block}}.sm-dox{background-image:var(--nav-gradient-image)}.sm-dox a,.sm-dox a:focus,.sm-dox a:hover,.sm-dox a:active{padding:0 12px;padding-right:43px;font-family:var(--font-family-nav);font-size:13px;font-weight:bold;line-height:36px;text-decoration:none;text-shadow:var(--nav-text-normal-shadow);color:var(--nav-text-normal-color);outline:0}.sm-dox a:hover{background-image:var(--nav-gradient-active-image);background-repeat:repeat-x;color:var(--nav-text-hover-color);text-shadow:var(--nav-text-hover-shadow)}.sm-dox a.current{color:#d23600}.sm-dox a.disabled{color:#bbb}.sm-dox a span.sub-arrow{position:absolute;top:50%;margin-top:-14px;left:auto;right:3px;width:28px;height:28px;overflow:hidden;font:bold 12px/28px monospace !important;text-align:center;text-shadow:none;background:var(--nav-menu-toggle-color);-moz-border-radius:5px;-webkit-border-radius:5px;border-radius:5px}.sm-dox a span.sub-arrow:before{display:block;content:'+'}.sm-dox a.highlighted span.sub-arrow:before{display:block;content:'-'}.sm-dox>li:first-child>a,.sm-dox>li:first-child>:not(ul) a{-moz-border-radius:5px 5px 0 0;-webkit-border-radius:5px;border-radius:5px 5px 0 0}.sm-dox>li:last-child>a,.sm-dox>li:last-child>*:not(ul) a,.sm-dox>li:last-child>ul,.sm-dox>li:last-child>ul>li:last-child>a,.sm-dox>li:last-child>ul>li:last-child>*:not(ul) a,.sm-dox>li:last-child>ul>li:last-child>ul,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>a,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>*:not(ul) a,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>a,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>*:not(ul) a,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>ul,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>a,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>*:not(ul) a,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>ul{-moz-border-radius:0 0 5px 5px;-webkit-border-radius:0;border-radius:0 0 5px 5px}.sm-dox>li:last-child>a.highlighted,.sm-dox>li:last-child>*:not(ul) a.highlighted,.sm-dox>li:last-child>ul>li:last-child>a.highlighted,.sm-dox>li:last-child>ul>li:last-child>*:not(ul) a.highlighted,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>a.highlighted,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>*:not(ul) a.highlighted,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>a.highlighted,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>*:not(ul) a.highlighted,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>a.highlighted,.sm-dox>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>ul>li:last-child>*:not(ul) a.highlighted{-moz-border-radius:0;-webkit-border-radius:0;border-radius:0}.sm-dox ul{background:var(--nav-menu-background-color)}.sm-dox ul a,.sm-dox ul a:focus,.sm-dox ul a:hover,.sm-dox ul a:active{font-size:12px;border-left:8px solid transparent;line-height:36px;text-shadow:none;background-color:var(--nav-menu-background-color);background-image:none}.sm-dox ul a:hover{background-image:var(--nav-gradient-active-image);background-repeat:repeat-x;color:var(--nav-text-hover-color);text-shadow:0 1px 1px black}.sm-dox ul ul a,.sm-dox ul ul a:hover,.sm-dox ul ul a:focus,.sm-dox ul ul a:active{border-left:16px solid transparent}.sm-dox ul ul ul a,.sm-dox ul ul ul a:hover,.sm-dox ul ul ul a:focus,.sm-dox ul ul ul a:active{border-left:24px solid transparent}.sm-dox ul ul ul ul a,.sm-dox ul ul ul ul a:hover,.sm-dox ul ul ul ul a:focus,.sm-dox ul ul ul ul a:active{border-left:32px solid transparent}.sm-dox ul ul ul ul ul a,.sm-dox ul ul ul ul ul a:hover,.sm-dox ul ul ul ul ul a:focus,.sm-dox ul ul ul ul ul a:active{border-left:40px solid transparent}@media(min-width:768px){.sm-dox ul{position:absolute;width:12em}.sm-dox li{float:left}.sm-dox.sm-rtl li{float:right}.sm-dox ul li,.sm-dox.sm-rtl ul li,.sm-dox.sm-vertical li{float:none}.sm-dox a{white-space:nowrap}.sm-dox ul a,.sm-dox.sm-vertical a{white-space:normal}.sm-dox .sm-nowrap>li>a,.sm-dox .sm-nowrap>li>:not(ul) a{white-space:nowrap}.sm-dox{padding:0 10px;background-image:var(--nav-gradient-image);line-height:36px}.sm-dox a span.sub-arrow{top:50%;margin-top:-2px;right:12px;width:0;height:0;border-width:4px;border-style:solid dashed dashed dashed;border-color:var(--nav-text-normal-color) transparent transparent transparent;background:transparent;-moz-border-radius:0;-webkit-border-radius:0;border-radius:0}.sm-dox a,.sm-dox a:focus,.sm-dox a:active,.sm-dox a:hover,.sm-dox a.highlighted{padding:0 12px;background-image:var(--nav-separator-image);background-repeat:no-repeat;background-position:right;-moz-border-radius:0 !important;-webkit-border-radius:0;border-radius:0 !important}.sm-dox a:hover{background-image:var(--nav-gradient-active-image);background-repeat:repeat-x;color:var(--nav-text-hover-color);text-shadow:var(--nav-text-hover-shadow)}.sm-dox a:hover span.sub-arrow{border-color:var(--nav-text-hover-color) transparent transparent transparent}.sm-dox a.has-submenu{padding-right:24px}.sm-dox li{border-top:0}.sm-dox>li>ul:before,.sm-dox>li>ul:after{content:'';position:absolute;top:-18px;left:30px;width:0;height:0;overflow:hidden;border-width:9px;border-style:dashed dashed solid dashed;border-color:transparent transparent #bbb transparent}.sm-dox>li>ul:after{top:-16px;left:31px;border-width:8px;border-color:transparent transparent var(--nav-menu-background-color) transparent}.sm-dox ul{border:1px solid #bbb;padding:5px 0;background:var(--nav-menu-background-color);-moz-border-radius:5px !important;-webkit-border-radius:5px;border-radius:5px !important;-moz-box-shadow:0 5px 9px rgba(0,0,0,0.2);-webkit-box-shadow:0 5px 9px rgba(0,0,0,0.2);box-shadow:0 5px 9px rgba(0,0,0,0.2)}.sm-dox ul a span.sub-arrow{right:8px;top:50%;margin-top:-5px;border-width:5px;border-color:transparent transparent transparent var(--nav-menu-foreground-color);border-style:dashed dashed dashed solid}.sm-dox ul a,.sm-dox ul a:hover,.sm-dox ul a:focus,.sm-dox ul a:active,.sm-dox ul a.highlighted{color:var(--nav-menu-foreground-color);background-image:none;border:0 !important}.sm-dox ul a:hover{background-image:var(--nav-gradient-active-image);background-repeat:repeat-x;color:var(--nav-text-hover-color);text-shadow:var(--nav-text-hover-shadow)}.sm-dox ul a:hover span.sub-arrow{border-color:transparent transparent transparent var(--nav-text-hover-color)}.sm-dox span.scroll-up,.sm-dox span.scroll-down{position:absolute;display:none;visibility:hidden;overflow:hidden;background:var(--nav-menu-background-color);height:36px}.sm-dox span.scroll-up:hover,.sm-dox span.scroll-down:hover{background:#eee}.sm-dox span.scroll-up:hover span.scroll-up-arrow,.sm-dox span.scroll-up:hover span.scroll-down-arrow{border-color:transparent transparent #d23600 transparent}.sm-dox span.scroll-down:hover span.scroll-down-arrow{border-color:#d23600 transparent transparent transparent}.sm-dox span.scroll-up-arrow,.sm-dox span.scroll-down-arrow{position:absolute;top:0;left:50%;margin-left:-6px;width:0;height:0;overflow:hidden;border-width:6px;border-style:dashed dashed solid dashed;border-color:transparent transparent var(--nav-menu-foreground-color) transparent}.sm-dox span.scroll-down-arrow{top:8px;border-style:solid dashed dashed dashed;border-color:var(--nav-menu-foreground-color) transparent transparent transparent}.sm-dox.sm-rtl a.has-submenu{padding-right:12px;padding-left:24px}.sm-dox.sm-rtl a span.sub-arrow{right:auto;left:12px}.sm-dox.sm-rtl.sm-vertical a.has-submenu{padding:10px 20px}.sm-dox.sm-rtl.sm-vertical a span.sub-arrow{right:auto;left:8px;border-style:dashed solid dashed dashed;border-color:transparent #555 transparent transparent}.sm-dox.sm-rtl>li>ul:before{left:auto;right:30px}.sm-dox.sm-rtl>li>ul:after{left:auto;right:31px}.sm-dox.sm-rtl ul a.has-submenu{padding:10px 20px !important}.sm-dox.sm-rtl ul a span.sub-arrow{right:auto;left:8px;border-style:dashed solid dashed dashed;border-color:transparent #555 transparent transparent}.sm-dox.sm-vertical{padding:10px 0;-moz-border-radius:5px;-webkit-border-radius:5px;border-radius:5px}.sm-dox.sm-vertical a{padding:10px 20px}.sm-dox.sm-vertical a:hover,.sm-dox.sm-vertical a:focus,.sm-dox.sm-vertical a:active,.sm-dox.sm-vertical a.highlighted{background:#fff}.sm-dox.sm-vertical a.disabled{background-image:var(--nav-gradient-image)}.sm-dox.sm-vertical a span.sub-arrow{right:8px;top:50%;margin-top:-5px;border-width:5px;border-style:dashed dashed dashed solid;border-color:transparent transparent transparent #555}.sm-dox.sm-vertical>li>ul:before,.sm-dox.sm-vertical>li>ul:after{display:none}.sm-dox.sm-vertical ul a{padding:10px 20px}.sm-dox.sm-vertical ul a:hover,.sm-dox.sm-vertical ul a:focus,.sm-dox.sm-vertical ul a:active,.sm-dox.sm-vertical ul a.highlighted{background:#eee}.sm-dox.sm-vertical ul a.disabled{background:var(--nav-menu-background-color)}} \ No newline at end of file diff --git a/Jupyter/spatiotemporal-colab.ipynb b/Jupyter/spatiotemporal-colab.ipynb new file mode 100644 index 000000000..693b3c0c4 --- /dev/null +++ b/Jupyter/spatiotemporal-colab.ipynb @@ -0,0 +1,1308 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4a3dfff8-9d60-418c-a5fd-a4adf7a121b1", + "metadata": {}, + "source": [ + "IMP spatiotemporal tutorial\n", + "========\n", + "\n", + "# Introduction\n", + "\n", + "Biomolecules are constantly in motion; therefore, a complete depiction of their function must include their dynamics instead of just static structures. We have developed an integrative spatiotemporal approach to model dynamic systems.\n", + "\n", + "Our approach applies a composite workflow, consisting of three modeling problems to compute (i) heterogeneity models, (ii) snapshot models, and (iii) trajectory models.\n", + "Heterogeneity models describe the possible biomolecular compositions of the system at each time point. Optionally, other auxiliary variables can be considered, such as the coarse location in the final state when modeling an assembly process.\n", + "For each heterogeneity model, one snapshot model is produced. A snapshot model is a set of alternative standard static integrative structure models based on the information available for the corresponding time point.\n", + "Then, trajectory models are created by connecting alternative snapshot models at adjacent time points. These trajectory models can be scored based on both the scores of static structures and the transitions between them, allowing for the creation of trajectories that are in agreement with the input information by construction.\n", + "\n", + "If you use this tutorial or its accompanying method, please site the corresponding publications:\n", + "\n", + "- Latham, A.P.; Tempkin, J.O.B.; Otsuka, S.; Zhang, W.; Ellenberg, J.; Sali, A. bioRxiv, 2024, https://doi.org/10.1101/2024.08.06.606842.\n", + "- Latham, A.P.; Ro\u017ei\u010d, M.; Webb, B.M., Sali, A. in preparation. (tutorial)\n", + "\n", + "# Integrative spatiotemporal modeling workflow\n", + "\n", + "In general, integrative modeling proceeds through three steps (i. gathering information; ii. choosing the model representation, scoring alternative models, and searching for good scoring models; and iii. assessing the models). In integrative spatiotemporal modeling, these three steps are repeated for each modeling problem in the composite workflow (i. modeling of heterogeneity, ii. modeling of snapshots, and iii. modeling of a trajectory).\n", + "\n", + "\n", + "\n", + "This tutorial will walk you through the creation of a spatiotemporal model for the hypothetical assembly mechanism of the Bmi1/Ring1b-UbcH5c complex. We note that all experimental data besides the static structure used in this study is purely hypothetical, and, thus, the model should not be interpreted to be meaningful about the actual assembly mechanism of the complex.\n", + "\n", + "Finally, this notebook is intended to present an abbreviated version of this protocol, with the computationally expensive steps excluded. A more complete version of this tutorial can be found as a series of python scripts at https://github.com/salilab/imp_spatiotemporal_tutorial.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "dc9d93b4-3b97-4187-867c-977f8353f4aa", + "metadata": {}, + "source": [ + "Modeling of heterogeneity\n", + "====================================\n", + "\n", + "Here, we describe the first modeling problem in our composite workflow, how to build models of heterogeneity modeling using IMP. In this tutorial, heterogeneity modeling only includes protein copy number; however, in general, other types of information, such as the coarse location in the final state, could also be included in heterogeneity models.\n", + "\n", + "# Heterogeneity modeling step 1: gathering of information\n", + "\n", + "We begin heterogeneity modeling with the first step of integrative modeling, gathering information. Heterogeneity modeling will rely on copy number information about the complex. In this case, we utilize the X-ray crystal structure of the fully assembled Bmi1/Ring1b-UbcH5c complex from the protein data bank (PDB), and synthetically generated protein copy numbers during the assembly process, which could be generated from experiments such as flourescence correlation spectroscopy (FCS).\n", + "\n", + "\n", + "\n", + "The PDB structure of the complex informs the final state of our model and constrains the maximum copy number for each protein, while the protein copy number data gives time-dependent information about the protein copy number in the assembling complex.\n", + "\n", + "# Heterogeneity modeling step 2: representation, scoring function, and search process\n", + "\n", + "Next, we represent, score and search for heterogeneity models models. A single heterogeneity model is a set of protein copy numbers, scored according to its fit to experimental copy number data at that time point. As ET and SAXS data, are only available at 0 minutes, 1 minute, and 2 minutes, we choose to create heterogeneity models at these three time points. We then use `prepare_protein_library`, to calculate the protein copy numbers for each snapshot model and to use the topology file of the full complex (`spatiotemporal_topology.txt`) to generate a topology file for each of these snapshot models. The choices made in this topology file are important for the representation, scoring function, and search process for snapshot models, and are discussed later. For heterogeneity modeling, we choose to model 3 protein copy numbers at each time point, and restrict the final time point to have the same protein copy numbers as the PDB structure.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66fae319-6215-4712-835d-b09606974260", + "metadata": {}, + "outputs": [], + "source": [ + "# For colab, we need to install IMP\n", + "!add-apt-repository -y ppa:salilab/ppa\n", + "!apt install imp\n", + "import sys, os, glob\n", + "sys.path.append(os.path.dirname(glob.glob('/usr/lib/python*/dist-packages/IMP')[0]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ff4d3e5-04de-4092-8cfc-2ad3018675da", + "metadata": {}, + "outputs": [], + "source": [ + "# General imports for the tutorial\n", + "import sys, os, glob, shutil\n", + "import IMP\n", + "import RMF\n", + "import IMP.rmf\n", + "from IMP.spatiotemporal import prepare_protein_library\n", + "import IMP.spatiotemporal as spatiotemporal\n", + "from IMP.spatiotemporal import analysis\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b78a015-32e5-4701-8c6f-df14863ba9ce", + "metadata": {}, + "outputs": [], + "source": [ + "# parameters for prepare_protein_library:\n", + "times = [\"0min\", \"1min\", \"2min\"]\n", + "exp_comp = {'A': '../modeling/Input_Information/gen_FCS/exp_compA.csv',\n", + " 'B': '../modeling/Input_Information/gen_FCS/exp_compB.csv',\n", + " 'C': '../modeling/Input_Information/gen_FCS/exp_compC.csv'}\n", + "expected_subcomplexes = ['A', 'B', 'C']\n", + "template_topology = '../modeling/Heterogeneity/Heterogeneity_Modeling/spatiotemporal_topology.txt'\n", + "template_dict = {'A': ['Ubi-E2-D3'], 'B': ['BMI-1'], 'C': ['E3-ubi-RING2']}\n", + "nmodels = 3\n", + "\n", + "# calling prepare_protein_library\n", + "prepare_protein_library.prepare_protein_library(times, exp_comp, expected_subcomplexes, nmodels,\n", + " template_topology=template_topology, template_dict=template_dict)" + ] + }, + { + "cell_type": "markdown", + "id": "b56fbe48-da12-412e-ac2e-dca673e04a43", + "metadata": {}, + "source": [ + "From the output of `prepare_protein_library`, we see that there are 3 heterogeneity models at each time point (it is possible to have more snapshot models than copy numbers if multiple copies of the protein exist in the complex). For each heterogeneity model, we see 2 files:\n", + "- *.config, a file with a list of proteins represented in the heterogeneity model\n", + "- *_topol.txt, a topology file for snapshot modeling corresponding to this heterogeneity model.\n", + "\n", + "# Heterogeneity modeling step 3: assessment\n", + "\n", + "Now, we have a variety of heterogeneity models. In general, there are four ways to assess a model: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model. Here, we will focus specifically on comparing the model to experimental data, as other assessments will be performed later, when the trajectory models are assessed.\n", + "\n", + "Next, we can plot the modeled and experimental copy numbers simultaneously for each protein, as shown below for proteins A (a), B (b), and C (c).\n", + "\n", + "\n", + "\n", + "From these plots, we observe that the range of possible experimental copy numbers are well sampled by the heterogeneity models, indicating that we are prepared for snapshot modeling." + ] + }, + { + "cell_type": "markdown", + "id": "496f2687-bb05-4f51-8eab-a1170f6ac5fa", + "metadata": {}, + "source": [ + "Modeling of snapshots\n", + "====================================\n", + "\n", + "Here, we describe the second modeling problem in our composite workflow, how to build models of static snapshot models using IMP. We note that this process is similar to previous tutorials of [actin](https://integrativemodeling.org/tutorials/actin/) and [RNA PolII](https://integrativemodeling.org/tutorials/rnapolii_stalk/).\n", + "\n", + "# Snapshot modeling step 1: gathering of information\n", + "\n", + "We begin snapshot modeling with the first step of integrative modeling, gathering information. Snapshot modeling utilizes structural information about the complex. In this case, we utilize heterogeneity models, the X-ray crystal structure of the fully assembled Bmi1/Ring1b-UbcH5c complex from the protein data bank (PDB), synthetically generated electron tomography (ET) density maps during the assembly process, and physical principles.\n", + "\n", + "\n", + "\n", + "The heterogeneity models inform protein copy numbers for the snapshot models. The PDB structure of the complex informs the structure of the individual proteins. The time-dependent ET data informs the size and shape of the assembling complex. physical principles inform connectivity and excluded volume.\n", + "\n", + "# Snapshot modeling step 2: representation, scoring function, and search process\n", + "\n", + "Next, we represent, score and search for snapshot models. This step is quite computationally expensive. Therefore, we will not run the modeling protocol in this notebook, though the scripts are available in `modeling/Snapshots/Snapshots_Modeling/`. Here, we will simply describe the important steps made by two scripts. The first, `static_snapshot.py`, uses IMP to represent, score, and search for a single static snapshot model. The second, `start_sim.py`, automates the creation of a snapshot model for each heterogeneity model.\n", + "\n", + "## Modeling one snapshot", + "\n", + "Here, we will describe the process of modeling a single snapshot model, as performed by running `static_snapshot.py`.\n", + "\n", + "### Representing the model\n", + "\n", + "We begin by representing the data and the model. In general, the *representation* of a system is defined by all the variables that need to be determined.\n", + "\n", + "For our model of a protein complex, we use a combination of two representations. The first is a series of *spherical beads*, which can correspond to portions of the biomolecules of interest, such as atoms or groups of atoms. The second is a series of *3D Gaussians*, which help calculate the overlap between our model and the density from ET data.\n", + "\n", + "Beads and Gaussians in our model belong to either a *rigid body* or *flexible string*. The positions of all beads and Gaussians in a single rigid body are constrained during sampling and do not move relative to each other. Meanwhile, flexible beads can move freely during sampling, but are restrained by sequence connectivity.\n", + "\n", + "To begin, we built a topology file with the representation for the model of the complete system, `spatiotemporal_topology.txt`, located in `Heterogeneity/Heterogeneity_Modeling/`. This complete topology was used as a template to build topologies of each heterogeneity model. Based on our observation of the structure of the complex, we chose to represent each protein with at least 2 separate rigid bodies, and left the first 28 residues of protein C as flexible beads. Rigid bodies were described with 1 bead for every residue, and 10 residues per Gaussian. Flexible beads were described with 1 bead for every residue and 1 residue per Gaussian. A more complete description of the options available in topology files is available in the the [TopologyReader](https://integrativemodeling.org/2.21.0/doc/ref/classIMP_1_1pmi_1_1topology_1_1TopologyReader.html) documentation.\n", + "\n", + "\\code{.txt}\n", + "|molecule_name | color | fasta_fn | fasta_id | pdb_fn | chain | residue_range | pdb_offset | bead_size | em_residues_per_gaussian | rigid_body | super_rigid_body | chain_of_super_rigid_bodies | \n", + "\n", + "|Ubi-E2-D3|blue|3rpg.fasta.txt|Ubi-E2-D3|3rpg.pdb|A|-1,18|2|1|10|1|1||\n", + "|Ubi-E2-D3|blue|3rpg.fasta.txt|Ubi-E2-D3|3rpg.pdb|A|19,147|2|1|10|2|1||\n", + "|BMI-1|red|3rpg.fasta.txt|BMI-1|3rpg.pdb|B|3,83|-2|1|10|3|2||\n", + "|BMI-1|red|3rpg.fasta.txt|BMI-1|3rpg.pdb|B|84,101|-2|1|10|4|2||\n", + "|E3-ubi-RING2|green|3rpg.fasta.txt|E3-ubi-RING2|BEADS|C|16,44|-15|1|1|5|3||\n", + "|E3-ubi-RING2|green|3rpg.fasta.txt|E3-ubi-RING2|3rpg.pdb|C|45,116|-15|1|10|6|3||\n", + "\\endcode\n", + "\n", + "Next, we must prepare `static_snapshot.py` to read in this topology file. We begin by defining the input variables, `state` and `time`, which define which topology to use, as well as the paths to other pieces of input information.\n", + "\n", + "\\code{.py}\n", + "### Running parameters to access correct path of ET_data for EM restraint", + "### and topology file for certain {state}_{time}_topol.txt", + "state = sys.argv[1]\n", + "time = sys.argv[2]\n", + "\n", + "### Topology file", + "topology_file = f\"../{state}_{time}_topol.txt\"\n", + "### Paths to input data for topology file", + "pdb_dir = \"../../../../Input_Information/PDB\"\n", + "fasta_dir = \"../../../../Input_Information/FASTA\"\n", + "### Path where forward gmms are created with BuildSystem (based ont topology file)", + "### If gmms exist, they will be used from this folder", + "forward_gmm_dir = \"../forward_densities/\"\n", + "### Path to experimental gmms", + "exp_gmm_dir= '../../../../Input_Information/ET_data/add_noise'\n", + "\\endcode\n", + "\n", + "Next, we build the system, using the topology tile, described above.\n", + "\\code{.py}\n", + "### Create a system from a topology file. Resolution is set on 1.", + "bs = IMP.pmi.macros.BuildSystem(mdl, resolutions= 1, name= f'Static_snapshots_{state}_{time}')\n", + "bs.add_state(t)\n", + "\\endcode\n", + "\n", + "Then, we prepare for later sampling steps by setting which Monte Carlo moves will be performed. Rotation (`rot`) and translation (`trans`) parameters are separately set for super rigid bodies (`srb`), rigid bodies (`rb`), and beads (`bead`).\n", + "\\code{.py}\n", + "### Macro execution: It gives hierarchy and degrees of freedom (dof).", + "### In dof we define how much can each (super) rigid body translate and rotate between two adjacent Monte Carlo steps", + "root_hier, dof = bs.execute_macro(max_rb_trans=1.0,\n", + " max_rb_rot=0.5, max_bead_trans=2.0,\n", + " max_srb_trans=1.0, max_srb_rot=0.5)\n", + "\\endcode\n", + "\n", + "### Scoring the model\n", + "\n", + "After building the model representation, we choose a scoring function to score the model based on input information. This scoring function is represented as a series of restraints that serve as priors.\n", + "\n", + "#### Connectivity", + "\n", + "We begin with a connectivity restraint, which restrains beads adjacent in sequence to be close in 3D space.\n", + "\n", + "\\code{.py}\n", + "#### Adding Restraints", + "#### Empty list where the data from restraints should be collected", + "output_objects=[]\n", + "\n", + "#### Two common restraints: ConnectivityRestraint and ExcludedVolumeSphere", + "#### ConnectivityRestraint is added for each \"molecule\" separately", + "for m in root_hier.get_children()[0].get_children():\n", + " cr = IMP.pmi.restraints.stereochemistry.ConnectivityRestraint(m)\n", + " cr.add_to_model()\n", + " output_objects.append(cr)\n", + "\\endcode\n", + "\n", + "#### Excluded volume", + "\n", + "Next is an excluded volume restraint, which restrains beads to minimize their spatial overlap.\n", + "\n", + "\\code{.py}\n", + "#### Add excluded volume", + "evr = IMP.pmi.restraints.stereochemistry.ExcludedVolumeSphere(\n", + " included_objects=[root_hier],\n", + " resolution=1000)\n", + "output_objects.append(evr)\n", + "evr.add_to_model()\n", + "\\endcode\n", + "\n", + "#### Electron tomography", + "\n", + "Finally, we restrain our models based on their fit to ET density maps. Both the experimental map and the forward protein density are represented as Gaussian mixture models (GMMs) to speed up scoring. The score is based on the log of the correlation coefficient between the experimental density and the forward protein density.\n", + "\n", + "\\code{.py}\n", + "#### Applying time-dependent EM restraint. Point to correct gmm / mrc file at each time point", + "#### Path to corresponding .gmm file (and .mrc file)", + "em_map = exp_gmm_dir + f\"/{time}_noisy.gmm\"\n", + "\n", + "#### Create artificial densities from hierarchy", + "densities = IMP.atom.Selection(root_hier,\n", + " representation_type=IMP.atom.DENSITIES).get_selected_particles()\n", + "\n", + "#### Create EM restraint based on these densities", + "emr = IMP.pmi.restraints.em.GaussianEMRestraint(\n", + " densities,\n", + " target_fn=em_map,\n", + " slope=0.000001,\n", + " scale_target_to_mass=True,\n", + " weight=1000)\n", + "output_objects.append(emr)\n", + "emr.add_to_model()\n", + "\\endcode\n", + "\n", + "### Searching for good scoring models\n", + "\n", + "After building a scoring function that scores alternative models based on their fit to the input information, we aim to search for good scoring models. For complicated systems, stochastic sampling techniques such as Monte Carlo (MC) sampling are often the most efficient way to compute good scoring models. Here, we generate a random initial configuration and then perform temperature replica exchange MC sampling with 16 temperatures from different initial configurations. By performing multiple runs of replica exchange MC from different initial configurations, we can later ensure that our sampling is sufficiently converged.\n", + "\n", + "\\code{.py}\n", + "### Generate random configuration", + "IMP.pmi.tools.shuffle_configuration(root_hier,\n", + " max_translation=50)\n", + "\n", + "### Perform replica exchange sampling", + "rex=IMP.pmi.macros.ReplicaExchange(mdl,\n", + " root_hier=root_hier,\n", + " monte_carlo_sample_objects=dof.get_movers(),\n", + " global_output_directory='output', # name 'output' is the best for imp sampcon select_good\n", + " output_objects=output_objects,\n", + " monte_carlo_steps=200, # Number of MC steps between writing frames.\n", + " number_of_best_scoring_models=0,\n", + " number_of_frames=500) # number of frames to be saved\n", + "### In our case, for each snapshot we generated 25000 frames altogether (50*500)", + "rex.execute_macro()\n", + "\\endcode\n", + "\n", + "After performing sampling, a variety of outputs will be created. These outputs include `.rmf` files, which contain multi-resolution models output by IMP, and `.out` files which contains a variety of information about the run such as the value of the restraints and the MC acceptance rate.\n", + "\n", + "## Generalizing modeling to all snapshots\n", + "\n", + "Next, we will describe the process of computing multiple static snapshot models, as performed by running `start_sim.py`.\n", + "\n", + "From heterogeneity modeling, we see that there are 3 heterogeneity models at each time point (it is possible to have more snapshot models than copy numbers if multiple copies of the protein exist in the complex), each of which has a corresponding topology file in `Heterogeneity/Heterogeneity_Modeling/`. We wrote a function, `generate_all_snapshots`, which creates a directory for each snapshot model, copies the python script and topology file into that directory, and submits a job script to run sampling. The job script will likely need to be customized for the user's computer or cluster.\n", + "\n", + "\\code{.py}\n", + "## 1a - parameters for generate_all_snapshots", + "## state_dict - universal parameter", + "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", + "\n", + "main_dir = os.getcwd()\n", + "topol_dir = os.path.join(os.getcwd(), '../../Heterogeneity/Heterogeneity_Modeling')\n", + "items_to_copy = ['static_snapshot.py'] # additionally we need to copy only specific topology file\n", + "## jobs script will likely depend on the user's cluster / configuration", + "job_template = (\"#!/bin/bash\\n#$ -S /bin/bash\\n#$ -cwd\\n#$ -r n\\n#$ -j y\\n#$ -N Tutorial\\n#$ -pe smp 16\\n\"\n", + " \"#$ -l h_rt=48:00:00\\n\\nmodule load Sali\\nmodule load imp\\nmodule load mpi/openmpi-x86_64\\n\\n\"\n", + " \"mpirun -np $NSLOTS python3 static_snapshot.py {state} {time}\")\n", + "number_of_runs = 50\n", + "\n", + "## 1b - calling generate_all_snapshots", + "generate_all_snapshots(state_dict, main_dir, topol_dir, items_to_copy, job_template, number_of_runs)\n", + "\n", + "\\endcode\n", + "\n", + "# Snapshot modeling step 3: assessment\n", + "\n", + "The above code would variety of alternative snapshot models. In general, we would like to assess these models in at least 4 ways: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model. In this portion of the tutorial, we focus specifically on estimating the sampling precision of the model, while quantitative comparisons between the model and experimental data will be reserved for the final step, when we assess trajectories. Again, this assessment process is quite computationally intensive, so, instead of running the script explicitly, we will walk you through the `snapshot_assessment.py` script, which is located in the `modeling/Snapshots/Snapshots_Assessment` folder.\n", + "\n", + "## Filtering good scoring models\n", + "\n", + "Initially, we want to filter the various alternative structural models to only select those that meet certain parameter thresholds. In this case, we filter the structural models comprising each snapshot model by the median cross correlation with EM data. We note that this filtering criteria is subjective, and developing a Bayesian method to objectively weigh different restraints for filtering remains an interesting future development in integrative modeling.\n", + "\n", + "The current filtering procedure involves three steps. In the first step, we look through the `stat.*.out` files to write out the cross correlation with EM data for each model, which, in this case, is labeled column `3`, `GaussianEMRestraint_None_CCC`. In other applications, the column that corresponds to each type of experimental data may change, depending on the scoring terms for each model. For each snapshot model, a new file is written with this data (`{state}_{time}_stat.txt`).\n", + "\n", + "\\code{.py}\n", + "## state_dict - universal parameter", + "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", + "## current directory", + "main_dir = os.getcwd()\n", + "\n", + "## 1 calling extracting_stat_files function and related parameters", + "keys_to_extract = [3]\n", + "runs_nr = 50\n", + "replica_nr = 16\n", + "replica_output_name = 'output'\n", + "decimals_nr = 16\n", + "\n", + "extracting_stat_files(state_dict, runs_nr, replica_nr, replica_output_name, keys_to_extract, decimals_nr)\n", + "print(\"extracting_stat_files is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "In the second step, we want to determine the median value of EM cross correlation for each snapshot model. We wrote `general_rule_calculation` to look through the `general_rule_column` for each `{state}_{time}_stat.txt` file and determine both the median value and the number of structures generated.\n", + "\n", + "\\code{.py}\n", + "## 2 calling general_rule_calculation and related parameters", + "general_rule_column = '3'\n", + "\n", + "general_rule_calculation(state_dict, general_rule_column)\n", + "\n", + "print(\"general_rule_calculation is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "In the third step, we use the `imp_sampcon select_good` tool to filter each snapshot model, according to the median value determined in the previous step. For each snapshot model, this function produces a file, `good_scoring_models/model_ids_scores.txt`, which contains the run, replicaID, scores, and sampleID for each model that passes filtering. It also saves RMF files with each model from two independent groups of sampling runs from each snapshot model to `good_scoring_models/sample_A` and `good_scoring_models/sample_B`, writes the scores for the two independent groups of sampling runs to `good_scoring_models/scoresA.txt` and `good_scoring_models/scoresB.txt`, and writes `good_scoring_models/model_sample_ids.txt` to connect each model to its division of sampling run. More information on `imp_sampcon` is available in the analysis portion of the [actin tutorial](https://integrativemodeling.org/tutorials/actin/analysis.html).\n", + "\n", + "\\code{.py}\n", + "## 3 calling general_rule_filter_independent_samples", + "general_rule_filter_independent_samples(state_dict, main_dir)\n", + "print(\"general_rule_filter_independent_samples is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "## Plotting data, clustering models, and determining sampling precision\n", + "\n", + "Next, scores can be plotted for analysis. Here, we wrote the `create_histograms` function to run `imp_sampcon plot_score` so that it plots distributions for various scores of interest. Each of these plots are saved to `histograms{state}_{time}/{score}.png`, where score is an object listed in the `score_list`. These plots are useful for debugging the modeling protocol, and should appear roughly Gaussian.\n", + "\n", + "\\code{.py}\n", + "## 4 calling create_histograms and related parameters", + "score_list = [\n", + " 'Total_Score',\n", + " 'ConnectivityRestraint_Score',\n", + " 'ExcludedVolumeSphere_Score',\n", + " 'GaussianEMRestraint_None',\n", + " 'GaussianEMRestraint_None_CCC'\n", + "] # list of histograms we want to create in each histograms{state}_{time} directory\n", + "\n", + "create_histograms(state_dict, main_dir, score_list)\n", + "print(\"create_histograms is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "We then check the number of models in each sampling run though our function, `count_rows_and_generate_report`, which writes the `independent_samples_stat.txt` file. Empirically, we have found that ensuring the overall number of models in each independent sample after filtering is roughly equal serves a good first check on sampling convergence.\n", + "\n", + "\\code{.py}\n", + "## 5 calling count_rows_and_generate_report", + "count_rows_and_generate_report(state_dict)\n", + "print(\"count_rows_and_generate_report is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "Next, we write the density range dictionaries, which are output as `{state}_{time}_density_ranges.txt`. These dictionaries label each protein in each snapshot model, which will be passed into `imp_sampcon` to calculate the localization density of each protein.\n", + "\n", + "\\code{.py}\n", + "## 6 calling create_density_dictionary:", + "create_density_dictionary_files(state_dict, main_dir)\n", + "print(\"create_density_dictionary is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "Next, we run `imp_sampcon exhaust` on each snapshot model. This code performs checks on the exhaustiveness of the sampling. Specifically it analyzes the convergence of the model score, whether the two model sets were drawn from the same distribution, and whether each structural cluster includes models from each sample proportionally to its size. The output for each snapshot model is written out to the `exhaust_{state}_{time}` folder.\n", + "\n", + "\\code{.py}\n", + "## 7 calling exhaust", + "exhaust(state_dict, main_dir)\n", + "print(\"exhaust is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "Plots for determining the sampling precision are shown below for a single snapshot model, 1_2min. (a) Tests the convergence of the lowest scoring model (`snapshot_{state}_{time}.Top_Score_Conv.pdf`). Error bars represent standard deviations of the best scores, estimated by selecting different subsets of models 10 times. The light-blue line indicates a lower bound reference on the total score. (b) Tests that the scores of two independently sampled models come from the same distribution (`snapshot_{state}_{time}.Score_Dist.pdf`). The difference between the two distributions, as measured by the KS test statistic (D) and KS test p-value (p) indicates that the difference is both statistically insignificant (p>0.05) and small in magnitude (D<0.3). (c) Determines the structural precision of a snapshot model (`snapshot_{state}_{time}.ChiSquare.pdf`). RMSD clustering is performed at 1 \u00c5 intervals until the clustered population (% clustered) is greater than 80%, and either the \u03c72 p-value is greater than 0.05 or Cramer\u2019s V is less than 0.1. The sampling precision is indicated by the dashed black line. (d) Populations from sample 1 and sample 2 are shown for each cluster (`snapshot_{state}_{time}.Cluster_Population.pdf`).\n", + "\n", + "\n", + "\n", + "Further structural analysis can be calculated by using the `cluster.*` files. The `cluster.*.{sample}.txt` files contain the model number for the models in that cluster, where `{sample}` indicates which round of sampling the models came from. The `cluster.*` folder contains an RMF for centroid model of that cluster, along with the localization densities for each protein. The localization densities of each protein from each independent sampling can be compared to ensure independent samplings produce the same results.\n", + "\n", + "Ideally, each of these plots should be checked for each snapshot model. As a way to summarize the output of these checks, we can gather the results of the KS test and the sampling precision test for all snapshot models. This is done by running `extract_exhaust_data` and `save_exhaust_data_as_png`, which write `KS_sampling_precision_output.txt` and `KS_sampling_precision_output.png`, respectively.\n", + "\n", + "\\code{.py}\n", + "## 8 calling extract_exhaust_data", + "extract_exhaust_data(state_dict)\n", + "print(\"extract_exhaust_data is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\n", + "## 9 calling save_exhaust_data_as_png", + "save_exhaust_data_as_png()\n", + "print(\"save_exhaust_data_as_png is DONE\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "These codes write a table that include the KS two sample test statistic (D), the KS test p-value, and the sampling precision for each snapshot model, which is replotted below.\n", + "\n", + "\n", + "\n", + "## Visualizing models\n", + "\n", + "The resulting RMF files and localization densities from this analysis can be viewed in [UCSF Chimera](https://www.rbvi.ucsf.edu/chimera/) (version>=1.13) or [UCSF ChimeraX](https://www.cgl.ucsf.edu/chimerax/).\n", + "\n", + "Here, we plotted each centroid model (A - blue, B - orange, and C - purple) from the most populated cluster for each snapshot model and compared that model to the experimental EM profile (gray).\n", + "\n", + "\n", + "\n", + "Finally, now that snapshot models were assessed, we can perform modeling of a trajectory." + ] + }, + { + "cell_type": "markdown", + "id": "6fc2546f-7c6f-4146-8c9b-ee143fcead6e", + "metadata": {}, + "source": [ + "Modeling of a Trajectory\n", + "====================================\n", + "\n", + "Here, we describe the final modeling problem in our composite workflow, how to build models of trajectory models using IMP.\n", + "\n", + "# Trajectory modeling step 1: gathering of information\n", + "\n", + "We begin trajectory modeling with the first step of integrative modeling, gathering information. Trajectory modeling utilizes dynamic information about the bimolecular process. In this case, we utilize heterogeneity models, snapshot models, physical theories, and synthetically generated small-angle X-ray scattering (SAXS) profiles.\n", + "\n", + "\n", + "\n", + "Heterogeneity models inform the possible compositional states at each time point and measure how well a compositional state agrees with input information. Snapshot models provide structural models for each heterogeneity model and measure how well those structural models agree with input information about their structure. Physical theories of macromolecular dynamics inform transitions between states. SAXS data informs the size and shape of the assembling complex and is left for validation.\n", + "\n", + "# Trajectory modeling step 2: representation, scoring function, and search process\n", + "\n", + "Trajectory modeling connects alternative snapshot models at adjacent time points, followed by scoring the trajectory models based on their fit to the input information, as described in full [here](https://www.biorxiv.org/content/10.1101/2024.08.06.606842v1.abstract).\n", + "\n", + "## Background behind integrative spatiotemporal modeling", + "\n", + "### Representing the model\n", + "\n", + "We choose to represent dynamic processes as a trajectory of snapshot models, with one snapshot model at each time point. In this case, we computed snapshot models at 3 time points (0, 1, and 2 minutes), so a single trajectory model will consist of 3 snapshot models, one at each 0, 1, and 2 minutes. The modeling procedure described here will produce a set of scored trajectory models, which can be displayed as a directed acyclic graph, where nodes in the graph represent the snapshot model and edges represent connections between snapshot models at neighboring time points.\n", + "\n", + "### Scoring the model\n", + "\n", + "To score trajectory models, we incorporate both the scores of individual snapshot models, as well as the scores of transitions between them. Under the assumption that the process is Markovian (*i.e.* memoryless), the weight of a trajectory model takes the form:\n", + "\n", + "$$\n", + "W(\\chi) \\propto \\displaystyle\\prod^{T}_{t=0} P( X_{N,t}, N_{t} | D_{t}) \\cdot \\displaystyle\\prod^{T-1}_{t=0} W(X_{N,t+1},N_{t+1} | X_{N,t},N_{t}, D_{t,t+1}),\n", + "$$\n", + "\n", + "where $t$ indexes times from 0 until the final modeled snapshot ($T$); $P(X_{N,t}, N_{t} | D_{t})$ is the snapshot model score; and \\f$W(X_{N,t+1},N_{t+1} | X_{N,t},N_{t}, D_{t,t+1})\\f$ is the transition score. Trajectory model weights ($W(\\chi)$) are normalized so that the sum of all trajectory models' weights is 1.0. Transition scores are currently based on a simple metric that either allows or disallows a transition. Transitions are only allowed if all proteins in the first snapshot model are included in the second snapshot model. In the future, we hope to include more detailed transition scoring terms, which may take into account experimental information or physical models of macromolecular dynamics.\n", + "\n", + "### Searching for good scoring models\n", + "\n", + "Trajectory models are constructed by enumerating all connections between adjacent snapshot models and scoring these trajectory models according to the equation above. This procedure results in a set of weighted trajectory models.\n", + "\n", + "## Computing trajectory models", + "\n", + "To compute trajectory models, we first copy all necessary files to a new directory, `data`. These files are (i) `{state}_{time}.config` files, which include the subcomplexes that are in each state, (ii) `{state}_{time}_scores.log`, which is a list of all scores of all structural models in that snapshot model, and (iii) `exp_comp{prot}.csv`, which is the experimental copy number for each protein (`{prot}`) as a function of time. Here, we copy files related to the snapshots (`*.log` files) from the `modeling` directory, as we skipped computing snapshots due to the computational expense.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bb887efe-0630-47b6-9bf1-85fa216a6816", + "metadata": {}, + "outputs": [], + "source": [ + "def merge_scores(fileA, fileB, outputFile):\n", + " \"\"\"\n", + " For each function merges scoresA.txt and scoresB.txt into {state}_{time}_scores.log\n", + "\n", + " :param fileA: path to scoresA.txt\n", + " :param fileB: path to scoresB.txt\n", + " :param outputFile: path to output merged .log file named {state}_{time}_scores.log for each snapshot.\n", + " This type of .log file is used in crete_DAG to generate trajectory model.\n", + " \"\"\"\n", + " # open both files, so data can be extracted\n", + " with open(fileA, 'r') as file_a:\n", + " data_a = file_a.readlines()\n", + "\n", + " with open(fileB, 'r') as file_b:\n", + " data_b = file_b.readlines()\n", + "\n", + " # Merge the content of both files\n", + " merged_data = data_a + data_b\n", + "\n", + " # Write the merged content into the output file\n", + " with open(outputFile, 'w') as output:\n", + " output.writelines(merged_data)\n", + "\n", + "def create_data_and_copy_files(state_dict, custom_source_dir1 = None, custom_source_dir2 = None, custom_source_dir3 = None):\n", + " \"\"\"\n", + " Copies three types of files important to generate trajectory models:\n", + " -.config files created with start_sim.py in Snapshot_Modeling (source_dir1)\n", + " -time-dependent stoichiometry data for each timepoint. Data should be presented in .csv file. With this function all\n", + " csv file in source_dir2 will be copied. These .csv files will be used in the exp_comp dictionary in create_DAG\n", + " function\n", + " -scoresA and scoresB for each snapshot created with imp sampcon exhaust\n", + " (source_dir1 + snapshot + good_scoring_models) are merged into total score .txt using merge_scores helper function.\n", + " All copied files are gathered in newly created './data/' directory, where everything is prepared for create_DAG\n", + " function.\n", + "\n", + "\n", + " :param state_dict (dict): state_dict: dictionary that defines the spatiotemporal model.\n", + " The keys are strings that correspond to each time point in the\n", + " stepwise temporal process. Keys should be ordered according to the\n", + " steps in the spatiotemporal process. The values are integers that\n", + " correspond to the number of possible states at that timepoint.\n", + " :param custom_source_dir1 (optional - str): Custom path to heterogeneity modeling dir (heterogeneity_modeling.py),\n", + " to copy .config files\n", + " :param custom_source_dir2 (optional - str): Custom path to stoichiometry data dir\n", + " :param custom_source_dir3 (optional - str): Custom path to snapshot modeling dir (start_sim.py), to copy .config\n", + " files and to access scoresA/scoresB (custom_source_dir3 + snapshot{state}_{time} + 'good_scoring_models')\n", + " \"\"\"\n", + "\n", + " # Create the destination directory if it does not exist (./data/). Here all the\n", + " destination_dir = './data/'\n", + " os.makedirs(destination_dir, exist_ok=True)\n", + "\n", + " # Path to heterogeneity modeling dir\n", + " if custom_source_dir1:\n", + " source_dir1 = custom_source_dir1\n", + " else:\n", + " source_dir1 = '../../Heterogeneity/Heterogeneity_Modeling/'\n", + "\n", + " # Path to stoichiometry data dir\n", + " if custom_source_dir2:\n", + " source_dir2 = custom_source_dir2\n", + " else:\n", + " source_dir2 = '../../Input_Information/gen_FCS/'\n", + "\n", + " # Path to snapshot modeling dir\n", + " if custom_source_dir3:\n", + " source_dir3 = custom_source_dir3\n", + " else:\n", + " source_dir3 = '../../Snapshots/Snapshots_Modeling/'\n", + "\n", + " # Copy all .config files from the first source directory to the destination directory\n", + " try:\n", + " for file_name in os.listdir(source_dir1):\n", + " if file_name.endswith('.config'):\n", + " full_file_name = os.path.join(source_dir1, file_name)\n", + " if os.path.isfile(full_file_name):\n", + " shutil.copy(full_file_name, destination_dir)\n", + " print(\".config files are copied\")\n", + " except Exception as e:\n", + " print(f\".config files cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", + "\n", + " # Copy all .csv stoichiometry files from the second source directory to the destination directory\n", + " try:\n", + " for file_name in os.listdir(source_dir2):\n", + " if file_name.endswith('.csv'):\n", + " full_file_name = os.path.join(source_dir2, file_name)\n", + " if os.path.isfile(full_file_name):\n", + " shutil.copy(full_file_name, destination_dir)\n", + " print(\".csv stoichiometry files are copied\")\n", + " except Exception as e:\n", + " print(f\".csv stoichiometry files cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", + "\n", + " # Copy scoresA and scoresB from the snapshot_{state}_{time} directories and first source directory path\n", + " for time in state_dict.keys():\n", + " for state in range(1, state_dict[time] + 1):\n", + " dir_name = f\"snapshot{state}_{time}\"\n", + " good_scoring_path = \"good_scoring_models\"\n", + " file_a = os.path.join(source_dir3, dir_name, good_scoring_path, \"scoresA.txt\")\n", + " file_b = os.path.join(source_dir3, dir_name, good_scoring_path, \"scoresB.txt\")\n", + " output_file = os.path.join(destination_dir, f\"{state}_{time}_scores.log\") # name of the output file\n", + "\n", + " try:\n", + " # Ensure the directory exists before try to read/write files\n", + " if os.path.exists(file_a) and os.path.exists(file_b):\n", + " merge_scores(file_a, file_b, output_file) # call helper function to merge files\n", + " print(f\"Scores for snapshot{state}_{time} have been merged and saved\")\n", + " else: # many things can go wrong here, so it is good to know where is the problem\n", + " print(f\"Path doesn't exist: {source_dir3}\")\n", + " print(f\"Files not found in directory: {dir_name}\")\n", + " print(f\"Files not found in directory: {file_a}\")\n", + " print(f\"Files not found in directory: {file_b}\")\n", + " print(f\"Output directory doesn't exist: {destination_dir}\")\n", + " except Exception as e:\n", + " print(f\"total scores files cannot be copied of merged. Reason for Error: {e}\")\n", + "\n", + "# copy all the relevant files for create_DAG\n", + "# it is important that everything starts from main dir\n", + "main_dir = os.getcwd()\n", + "os.chdir(main_dir)\n", + "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", + "create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../modeling/Input_Information/gen_FCS/', custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", + "\n", + "# then trajectory model is created based on the all copied data\n", + "expected_subcomplexes = ['A', 'B', 'C']\n", + "exp_comp = {'A': 'exp_compA.csv', 'B': 'exp_compB.csv', 'C': 'exp_compC.csv'}\n", + "input = './data/'\n", + "output = \"../output/\"" + ] + }, + { + "cell_type": "markdown", + "id": "7a4053cf-f2a3-4836-89f3-a8b2098109f2", + "metadata": {}, + "source": [ + "Next, we compute the spatiotemporal model. The inputs we included are:\n", + "- state_dict (dict): a dictionary that defines the spatiotemporal model. Keys are strings for each time point in the spatiotemporal process and values are integers corresponding to the number of snapshot models computed at that time point\n", + "- out_pdf (bool): whether to write the probability distribution function (pdf).\n", + "- npaths (int): Number of states two write to a file (path*.txt).\n", + "- input_dir (str): directory with the input information.\n", + "- scorestr (str): final characters at the end of the score files.\n", + "- output_dir (str): directory to which model will be written. Will be created if it does not exist.\n", + "- spatio_temporal_rule (bool): whether to include our transition scoring term, which enforces that all proteins in the first snapshot model are included in the second snapshot model.\n", + "- expected_subcomplexes (list): list of string objects, which is the subcomplexes to look when enforcing the spatiotemporal rule. Strings should be substrings of those in `{state}_{time}.config` files.\n", + "- score_comp (bool): whether to score the composition of each snapshot model.\n", + "- exp_comp_map (dictionary): key is a string with the name of each protein that will undergo composition scoring, value is the `.csv` file with the copy number data for that protein.\n", + "- draw_dag (bool): whether to write out an image of the directed acyclic graph." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "819bb205-fa52-42f9-a4ab-d2f7c3cff0ad", + "metadata": {}, + "outputs": [], + "source": [ + "nodes, graph, graph_prob, graph_scores = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", + " input_dir=input, scorestr='_scores.log',\n", + " output_dir=output, spatio_temporal_rule=True,\n", + " expected_subcomplexes=expected_subcomplexes,\n", + " score_comp=True, exp_comp_map=exp_comp,\n", + " draw_dag=True)" + ] + }, + { + "cell_type": "markdown", + "id": "7184fcec-05b9-4348-a9b8-d3673db00fe5", + "metadata": {}, + "source": [ + "After running `spatiotemporal.create_DAG`, a variety of outputs are written:\n", + "- `cdf.txt`: the cumulative distribution function for the set of trajectory models.\n", + "- `pdf.txt`: the probability distribution function for the set of trajectory models.\n", + "- `labeled_pdf.txt`: Each row has 2 columns and represents a different trajectory model. The first column labels a single trajectory model as a series of snapshot models, where each snapshot model is written as `{state}_{time}|` in sequential order. The second column is the probability distribution function corresponding to that trajectory model.\n", + "- `dag_heatmap.eps` and `dag_heatmap`: image of the directed acyclic graph from the set of models.\n", + "- `path*.txt`: files where each row includes a `{state}_{time}` string, so that rows correspond to the states visited over that trajectory model. Files are numbered from the most likely path to the least likely path.\n", + "\n", + "Now that we have a trajectory model, we can plot the directed acyclic graph (left) and the series of centroid models from each snapshot model along the most likely trajectory model (right). Each row corresponds to a different time point in the assembly process (0 min, 1 min, and 2 min). Each node is shaded according to its weight in the final model ($W(X_{N,t}N_{t})$). Proteins are colored as A - blue, B - orange, and C - purple.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "fb4c016e-e5ba-4e3b-a76e-499750abb782", + "metadata": {}, + "source": [ + "\\image html Spatiotemporal_Model.png width=600px\n", + "\n", + "# Trajectory modeling step 3: assessment\n", + "\n", + "Now that the set of spatiotemporal models has been constructed, we must evaluate these models. We can evaluate these models in at least 4 ways: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model.\n", + "\n", + "## Sampling precision\n", + "\n", + "To begin, we calculate the sampling precision of the models. The sampling precision is calculated by using `spatiotemporal.create_DAG` to reconstruct the set of trajectory models using 2 independent sets of samplings for snapshot models. Then, the overlap between these snapshot models is evaluated using `analysis.temporal_precision`, which takes in two `labeled_pdf` files.\n", + "\n", + "The temporal precision can take values between 1.0 and 0.0, and indicates the overlap between the two models in trajectory space. Hence, values close to 1.0 indicate a high sampling precision, while values close to 0.0 indicate a low sampling precision. Here, the value close to 1.0 indicates that sampling does not affect the weights of the trajectory models.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f1436e33-81b6-400c-bf9c-7b667455265a", + "metadata": {}, + "outputs": [], + "source": [ + "## 1 - calculation of temporal precision\n", + "\n", + "# 1 - copy_files_for_data (copy all relevant files into 'data' directory)\n", + "def copy_files_for_data(state_dict, custom_source_dir1 = None, custom_source_dir2 = None, custom_source_dir3 = None):\n", + " \"\"\"\n", + " Copies three types of files important to generate trajectory models:\n", + " -.config files created with start_sim.py in Snapshot_Modeling (source_dir1)\n", + " -time-dependent stoichiometry data for each timepoint. Data should be presented in .csv file. With this function all\n", + " csv file in source_dir2 will be copied. These .csv files will be used in the exp_comp dictionary in create_DAG\n", + " function\n", + " -scoresA and scoresB for each snapshot created with imp sampcon exhaust\n", + " (source_dir1 + snapshot + good_scoring_models) are merged into total score .txt using merge_scores helper function.\n", + " All copied files are gathered in newly created './data/' directory, where everything is prepared for create_DAG\n", + " function.\n", + "\n", + "\n", + " :param state_dict (dict): state_dict: dictionary that defines the spatiotemporal model.\n", + " The keys are strings that correspond to each time point in the\n", + " stepwise temporal process. Keys should be ordered according to the\n", + " steps in the spatiotemporal process. The values are integers that\n", + " correspond to the number of possible states at that timepoint.\n", + " :param custom_source_dir1 (optional - str): Custom path to heterogeneity modeling dir (heterogeneity_modeling.py),\n", + " to copy .config files\n", + " :param custom_source_dir2 (optional - str): Custom path to stoichiometry data dir\n", + " :param custom_source_dir3 (optional - str): Custom path to snapshot modeling dir (start_sim.py), to copy .config\n", + " files and to access scoresA/scoresB (custom_source_dir3 + snapshot{state}_{time} + 'good_scoring_models')\n", + " \"\"\"\n", + " # Create the destination directory for all the data copied in this function\n", + " destination_dir = './data/'\n", + " os.makedirs(destination_dir, exist_ok=True)\n", + "\n", + " # path to snapshot modeling dir\n", + " if custom_source_dir1:\n", + " source_dir1 = custom_source_dir1\n", + " else:\n", + " source_dir1 = '../../Heterogeneity/Heterogeneity_Modeling/'\n", + "\n", + " # path to stoichiometry data dir\n", + " if custom_source_dir2:\n", + " source_dir2 = custom_source_dir1\n", + " else:\n", + " source_dir2 = '../../Input_Information/gen_FCS/'\n", + "\n", + " # path to snapshot modeling dir\n", + " if custom_source_dir3:\n", + " source_dir3 = custom_source_dir3\n", + " else:\n", + " source_dir3 = '../../Snapshots/Snapshots_Modeling/'\n", + "\n", + " # Copy all .config files from the first source directory to the destination directory\n", + " try:\n", + " for file_name in os.listdir(source_dir1):\n", + " if file_name.endswith('.config'):\n", + " full_file_name = os.path.join(source_dir1, file_name)\n", + " if os.path.isfile(full_file_name):\n", + " shutil.copy(full_file_name, destination_dir)\n", + " print(\".config files are copied\")\n", + " except Exception as e:\n", + " print(f\".config files cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", + "\n", + " # Copy all .csv stoichiometry files from the second source directory to the destination directory\n", + " try:\n", + " for file_name in os.listdir(source_dir2):\n", + " if file_name.endswith('.csv'):\n", + " full_file_name = os.path.join(source_dir2, file_name)\n", + " if os.path.isfile(full_file_name):\n", + " shutil.copy(full_file_name, destination_dir)\n", + " print(\".csv stoichiometry files are copied\")\n", + " except Exception as e:\n", + " print(f\".csv stoichiometry files cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", + "\n", + " # Copy scoresA and scoresB from the snapshot_{state}_{time} directories and first source directory path\n", + " try:\n", + " for time in state_dict.keys():\n", + " for state in range(1, state_dict[time] + 1):\n", + " snapshot_dir = os.path.join(source_dir3, f'snapshot{state}_{time}')\n", + " good_scoring_models_dir = os.path.join(snapshot_dir, 'good_scoring_models')\n", + " if os.path.isdir(good_scoring_models_dir):\n", + " for score_file in ['scoresA.txt', 'scoresB.txt']:\n", + " full_file_name = os.path.join(good_scoring_models_dir, score_file)\n", + " if os.path.isfile(full_file_name):\n", + " new_file_name = f'{state}_{time}_{os.path.splitext(score_file)[0]}.log'\n", + " shutil.copy(full_file_name, os.path.join(destination_dir, new_file_name))\n", + " print(f\"Copied {full_file_name} to {os.path.join(destination_dir, new_file_name)}\")\n", + " except Exception as e:\n", + " print(f\"scoresA.txt and scoresB.txt cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", + "\n", + "os.chdir(main_dir)\n", + "# copy all the relevant files\n", + "copy_files_for_data(state_dict, custom_source_dir1='../modeling/Heterogeneity/Heterogeneity_Modeling/',\n", + " custom_source_dir2='../modeling/Input_Information/gen_FCS/',\n", + " custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", + "\n", + "# create two independent DAGs\n", + "expected_subcomplexes = ['A', 'B', 'C']\n", + "exp_comp = {'A': 'exp_compA.csv', 'B': 'exp_compB.csv', 'C': 'exp_compC.csv'}\n", + "input = \"./data/\"\n", + "outputA = \"../output_modelA/\"\n", + "outputB = \"../output_modelB/\"\n", + "\n", + "# Output from sampling precision and model precision to be saved in united dir: analysis_output_precision\n", + "analysis_output = \"./analysis_output_precision/\"\n", + "os.makedirs(analysis_output, exist_ok=True)\n", + "\n", + "nodesA, graphA, graph_probA, graph_scoresA = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", + " input_dir=input, scorestr='_scoresA.log',\n", + " output_dir=outputA,\n", + " spatio_temporal_rule=True,\n", + " expected_subcomplexes=expected_subcomplexes,\n", + " score_comp=True, exp_comp_map=exp_comp,\n", + " draw_dag=False)\n", + "\n", + "os.chdir(main_dir)\n", + "nodesB, graphB, graph_probB, graph_scoresB = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", + " input_dir=input, scorestr='_scoresB.log',\n", + " output_dir=outputB,\n", + " spatio_temporal_rule=True,\n", + " expected_subcomplexes=expected_subcomplexes,\n", + " score_comp=True, exp_comp_map=exp_comp,\n", + " draw_dag=False)\n", + "\n", + "## 1 - analysis\n", + "analysis.temporal_precision(outputA + 'labeled_pdf.txt', outputB + 'labeled_pdf.txt',\n", + " output_fn='.' + analysis_output + 'temporal_precision.txt')\n", + "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", + "print(\"Step 1: calculation of temporal precision IS COMPLETED\")\n", + "print(\"\")\n", + "print(\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "6ce48a52-b06c-4e09-80a5-debe0fd2cba2", + "metadata": {}, + "source": [ + "## Model precision\n", + "\n", + "Next, we calculate the precision of the model, using `analysis.precision`. Here, the model precision calculates the number of trajectory models with high weights. The precision ranges from 1.0 to 1/d, where d is the number of trajectory models. Values approaching 1.0 indicate the model set can be described by a single trajectory model, while values close to 1/d indicate that all trajectory models have similar weights.\n", + "\n", + "The `analysis.precision` function reads in the `labeled_pdf` of the complete model, and calculates the precision of the model. The value close to 1.0 indicates that the set of models can be sufficiently represented by a single trajectory model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3669407-4ddc-4e1f-b8ba-c8638024cd56", + "metadata": {}, + "outputs": [], + "source": [ + "## 2 - calculation of precision of the model\n", + "\n", + "# precision is calculated from .labeled_pdf.txt in Trajectories_Modeling dir\n", + "trajectories_modeling_input_dir = \"./output/\"\n", + "\n", + "analysis.precision(trajectories_modeling_input_dir + 'labeled_pdf.txt', output_fn=analysis_output + 'precision.txt')\n", + "\n", + "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", + "print(\"Step 2: calculation of precision of the model IS COMPLETED\")\n", + "print(\"\")\n", + "print(\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "8a44c8b5-93d8-4ab8-b8cc-d6a70168c0eb", + "metadata": {}, + "source": [ + "## Comparison against data used in model construction\n", + "\n", + "We then evaluate the model against data used in model construction. First, we can calculate the cross-correlation between the original EM map and the forward density projected from each snapshot model. This calculation is too computationally expensive for this notebook, but can be found in `modeling/Trajectories/Trajectories_Assessment`, where we wrote the `ccEM` function to perform this comparison for all snapshot models.\n", + "\n", + "\\code{.py}\n", + "## 3a - comparison of the model to data used in modeling (EM)", + "exp_mrc_base_path = \"../../Input_Information/ET_data/add_noise\"\n", + "ccEM(exp_mrc_base_path)\n", + "print(\"Step 3a: ET validation IS COMPLETED\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "The results of this comparison are shown below." + ] + }, + { + "cell_type": "markdown", + "id": "70a6e0a0-98d5-4b63-a839-221a4fdc493b", + "metadata": {}, + "source": [ + "After comparing the model to EM data, we aimed to compare the model to copy number data, and wrote the `forward_model_copy_number` function to evaluate the copy numbers from our set of trajectory models. The output of `forward_model_copy_number` is written in `forward_model_copy_number/`. The folder contains `CN_prot_{prot}.txt` files for each protein, which have the mean and standard deviation of protein copy number at each time point. We can then plot these copy numbers from the forward models against those from the experiment, as shown below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3f7c451-2b04-4e8b-b39c-16c493e8a1a6", + "metadata": {}, + "outputs": [], + "source": [ + "def read_labeled_pdf(pdf_file):\n", + " \"\"\"\n", + " Function to read in a labeled probability distribution file output by spatiotemporal.create_DAG.\n", + " Used to determine protein copy numbers by forward_model_copy_number.\n", + " :param pdf_file (str): sting for the path of the labeled probability distribution file output by\n", + " spatiotemporal.create_DAG.\n", + " :return prob_dict (dict): dictionary defining the spatiotemporal model. Each key is a state, and each value is the\n", + " probability of that state.\n", + " \"\"\"\n", + " # create blank dictonary to store the results\n", + " prob_dict = {}\n", + " # read in labeled pdf file\n", + " old = open(pdf_file, 'r')\n", + " line = old.readline()\n", + " # store the path through various nodes, as well as the probability of that path\n", + " while line:\n", + " line_split = line.split()\n", + " # assumes the first string is the trajectory string, the second string is the probability\n", + " if len(line_split) > 1:\n", + " # use # for comments\n", + " if line_split[0]=='#':\n", + " pass\n", + " else:\n", + " trj = line_split[0]\n", + " prob = float(line_split[1])\n", + " # store in dictionary\n", + " prob_dict[trj] = prob\n", + " line = old.readline()\n", + " old.close()\n", + " return prob_dict\n", + "\n", + "def copy_number_from_state(prot_list,trj,custom_data_folder = None):\n", + " \"\"\"\n", + " For a trajectory, returns an array of protein copy numbers as a function of time. Used by\n", + " forward_model_copy_number().\n", + " :param prot_list (list): list of proteins in the model. These proteins are searched for in each config file.\n", + " :param trj (str): string defining a single trajectory.\n", + " :param custom_data_folder (str, optional): path to custom data folder. Defaults to None, which points to '../data/'\n", + " :return _prots (array): 2D array of protein copy numbers. The first index loops over the time,\n", + " while the second index value loops over the protein (ordered as A, B, C).\n", + " :return N (int): Number of time points in each trajectory.\n", + " \"\"\"\n", + " # find folder with config files\n", + " if custom_data_folder:\n", + " data_folder = custom_data_folder\n", + " else:\n", + " data_folder = 'data/'\n", + "\n", + " # split the trajectory into a list of individual states\n", + " state_list=trj.split('|')\n", + " state_list=state_list[:-1]\n", + "\n", + " N = len(state_list)\n", + " # Map from index to protein: 0 - A, 1- B, 2- C\n", + " _prots = np.zeros((N, len(prot_list)))\n", + "\n", + " # Grab _prots from .config file\n", + " for i in range(0, N):\n", + " prot_file = data_folder + state_list[i] + '.config'\n", + " to_read = open(prot_file, 'r')\n", + " line = to_read.readline()\n", + " while line:\n", + " # for each line, check if the protein is in that line\n", + " for prot_index in range(len(prot_list)):\n", + " if prot_list[prot_index] in line:\n", + " _prots[i, prot_index] += 1\n", + " line = to_read.readline()\n", + "\n", + " return _prots,N\n", + "\n", + "def forward_model_copy_number(prot_list,custom_labeled_pdf=None):\n", + " \"\"\"\n", + " Code to perform copy number analysis on each protein in the model. Writes output files where each row is ordered\n", + " according to the time point in the model and the first column is the mean copy number, while the second column is\n", + " the standard deviation in copy number.\n", + " :param prot_list (list): list of proteins in the model. These proteins are searched for in each config file.\n", + " :param custom_labeled_pdf (str, optional): path to custom labeled probability distribution file output by\n", + " spatiotemporal.create_DAG.\n", + " \"\"\"\n", + " # find folder with config files\n", + " if custom_labeled_pdf:\n", + " _labeled_pdf = custom_labeled_pdf\n", + " else:\n", + " _labeled_pdf = '../Trajectories_Modeling/output/labeled_pdf.txt'\n", + "\n", + " # Read in labeled_pdf file into a dictionary. Each trajectory is listed as a dictionary,\n", + " # with keys as the trajectory and the values as the probability of that trajectory\n", + " prob_dict = read_labeled_pdf(_labeled_pdf)\n", + "\n", + " # Loop over the full dictionary. Create a list with 2 values:\n", + " # 1) the probability of the state, 2) the protein copy number of that state.\n", + " key_list = prob_dict.keys()\n", + " prot_prob = []\n", + " for key in key_list:\n", + " CN,N_times = copy_number_from_state(prot_list,key)\n", + " prot_prob.append([prob_dict[key], CN])\n", + "\n", + " # Construct the full path to the output directory\n", + " dir_name = \"forward_model_copy_number\"\n", + " full_path = os.path.join(main_dir, dir_name)\n", + " os.makedirs(full_path, exist_ok=True)\n", + " os.chdir(full_path)\n", + "\n", + " # Determine copy number from the prot_prob\n", + " for index in range(len(prot_prob[0][1][0])):\n", + " copy_number = np.zeros((N_times, 2))\n", + " # calculate mean\n", + " for state in prot_prob:\n", + " for i in range(N_times):\n", + " copy_number[i, 0] += state[0] * state[1][i][index]\n", + " # calculate std deviation\n", + " for state in prot_prob:\n", + " for i in range(N_times):\n", + " # Calculate variance\n", + " copy_number[i, 1] += state[0] * ((state[1][i][index] - copy_number[i, 0]) ** 2)\n", + " # Take square root to get the standard deviation\n", + " copy_number[:, 1] = np.sqrt(copy_number[:, 1])\n", + " # save to file\n", + " np.savetxt('CN_prot_'+prot_list[index]+'.txt', copy_number, header='mean CN\\tstd CN')\n", + "\n", + "# 3b - comparison of the model to data used in modeling (copy number)\n", + "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", + "forward_model_copy_number(expected_subcomplexes,custom_labeled_pdf='output/labeled_pdf.txt')\n", + "print(\"Step 3b: copy number validation IS COMPLETED\")\n", + "print(\"\")\n", + "print(\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "de81dea5-9d98-4424-ba64-d7a2def893b9", + "metadata": {}, + "source": [ + "Here, we plot the comparison between the experimental data used in model construction and the set of trajectory models. This analysis includes the cross-correlation coefficient between the experimental EM density and the forward density of the set of sufficiently good scoring modeled structures in the highest weighted trajectory model (a), as well as comparisons between experimental and modeled protein copy numbers for proteins A (b), B (c), and C (d). Here, we see the model is in good agreement with the data used to construct it.\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "id": "305b6e15-1147-4706-bb26-44125961f9fb", + "metadata": {}, + "source": [ + "## Validation against data not used in model construction\n", + "\n", + "Finally, we aim to compare the model to data not used in model construction. Specifically, we reserved SAXS data for model validation. We aimed to compare the forward scattering profile from the centroid structural model of each snapshot model to the experimental profile. To make this comparison, we wrote functions that converted each centroid RMF to a PDB (`convert_rmfs`), copied the experimental SAXS profiles to the appropriate folder (`copy_SAXS_dat_files`), and ran [FoXS](https://integrativemodeling.org/tutorials/foxs/foxs.html) on each PDB to evaluate its agreement to the experimental profile (`process_foxs`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d5cc728a-6159-4317-94eb-525dccc780aa", + "metadata": {}, + "outputs": [], + "source": [ + "# 4a - SAXS\n", + "\"\"\"\n", + "Comparing center models of the most dominant cluster for each snapshot (rmfs) to the SAXS data for each time point\n", + " can be done in two steps:\n", + "-converting rmfs to pdb files\n", + "-comparing pdbs of each snapshot to experimental SAXS profile using FoXS\n", + "\"\"\"\n", + "\n", + "def convert_rmfs(state_dict, model, custom_path=None):\n", + " \"\"\"\n", + " The purpose of this function is to automate the conversion of RMF files into PDB files for all the states from\n", + " state_dict. Created PDBs are further used in comparison of SAXS profiles using FoXS. Additionally, they can be\n", + " used for comparison to native PDB if available.\n", + "\n", + " :param state_dict (dict): dictionary that defines the spatiotemporal model.\n", + " The keys are strings that correspond to each time point in the\n", + " stepwise temporal process. Keys should be ordered according to the\n", + " steps in the spatiotemporal process. The values are integers that\n", + " correspond to the number of possible states at that timepoint.\n", + " :param model (str): An IMP (Integrative Modeling Platform) model object.\n", + " :param custom_path (optional - str): A custom path for the RMF file, allowing for flexibility in file location\n", + " (should be compliant with stat_dict).\n", + " \"\"\"\n", + "\n", + " for time in state_dict.keys():\n", + " for state in range(1, state_dict[time] + 1):\n", + " if custom_path:\n", + " sim_rmf = custom_path # option for custom path\n", + " else:\n", + " sim_rmf = f\"../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3\"\n", + "\n", + " pdb_output = f\"snapshot{state}_{time}.pdb\" # define the output of converted .pdb file\n", + "\n", + " if os.path.exists(sim_rmf):\n", + " try:\n", + " rmf_fh = RMF.open_rmf_file_read_only(sim_rmf) # open rmf file for reading\n", + " rmf_hierarchy = IMP.rmf.create_hierarchies(rmf_fh, model)[0] # extract 1st hierarchy\n", + " IMP.atom.write_pdb_of_c_alphas(rmf_hierarchy, pdb_output) # write coordinates of CA to .pdb\n", + " print(f\"Finishing: snapshot{state}_{time}.pdb\")\n", + " except Exception as e:\n", + " print(f\"{sim_rmf} is empty or there is another problem: {e}\")\n", + "\n", + "\n", + "def copy_SAXS_dat_files(custom_src_dir = None):\n", + " \"\"\"\n", + " Copies all files ending with .dat from the specified directory to the current directory.\n", + "\n", + " :param custom_src_dir (optional - str): Path to the source directory\n", + " \"\"\"\n", + " if custom_src_dir:\n", + " src_dir = custom_src_dir\n", + " else:\n", + " src_dir = '../../../Input_Information/gen_SAXS'\n", + " try:\n", + " files = os.listdir(src_dir) # Get the list of all files in the src_dir directory\n", + " dat_files = [f for f in files if f.endswith('.dat')] # Filter out files that end with .dat\n", + "\n", + " # Copy each .dat file to the current directory, so FoXS can be used\n", + " for file_name in dat_files:\n", + " full_file_name = os.path.join(src_dir, file_name)\n", + " if os.path.isfile(full_file_name):\n", + " shutil.copy(full_file_name, os.getcwd())\n", + " # print(f\"Copied: {full_file_name} to {main_dir}\")\n", + "\n", + " print(\"All .dat files have been copied successfully...\")\n", + "\n", + " except Exception as e:\n", + " print(f\"An error occurred: {e}\")\n", + "\n", + "\n", + "def process_foxs(state_dict, custom_dat_file = None):\n", + " \"\"\"\n", + " This function automates the FoXS analysis for all specified time points in a single execution. It processes PDB\n", + " files generated by the convert_rmfs function and uses SAXS data copied with the copy_SAXS function. All of this\n", + " data should be present in the current running directory.\n", + " FoXS tutorial is available here: https://integrativemodeling.org/tutorials/foxs/foxs.html\n", + "\n", + " :param state_dict (dict): dictionary that defines the spatiotemporal model.\n", + " The keys are strings that correspond to each time point in the\n", + " stepwise temporal process. Keys should be ordered according to the\n", + " steps in the spatiotemporal process. The values are integers that\n", + " correspond to the number of possible states at that timepoint.\n", + " :param custom_dat_file (optional - str)): A custom name of SAXS files for each time point (should be\n", + " compliant with stat_dict)\n", + " \"\"\"\n", + "\n", + "\n", + " print(\"...lets proceed to FoXS\")\n", + "\n", + " for time in state_dict.keys():\n", + " try:\n", + " if state_dict[time] > 1:\n", + " # if there is more than one state in timepoint, FoXS creates fit.plt and it should be renamed\n", + " if custom_dat_file:\n", + " dat_file = custom_dat_file\n", + " else:\n", + " dat_file = f\"{time}_exp.dat\"\n", + "\n", + " pdb_files = \" \".join([f\"snapshot{state}_{time}.pdb\" for state in range(1, state_dict[time] + 1)])\n", + "\n", + " command1 = f\"foxs -r -g {pdb_files} {dat_file}\"\n", + " # example how FoXS command should look like: foxs -r -g snapshot1_0min.pdb snapshot2_0min.pdb snapshot3_0min.pdb 0min_exp.dat\n", + " os.system(command1)\n", + " print(f\"FoXS for {time} is calculated and ready to create a plot. Nr of states is: {state_dict[time]}\")\n", + "\n", + " command2 = f\"gnuplot fit.plt\" # create plot from .plt code\n", + " os.system(command2)\n", + "\n", + " command3 = f\"mv fit.plt {time}_FoXS.plt\" # rename .plt to avoid to be overwritten\n", + " os.system(command3)\n", + "\n", + " command4 = f\"mv fit.png {time}_FoXS.png\" # rename plot to avoid to be overwritten\n", + " os.system(command4)\n", + "\n", + " print(f\"Plot {time}_FoXS.png is created\")\n", + "\n", + " elif state_dict[time] == 1:\n", + " print(f\"There is only one state in {time}\")\n", + " dat_file1 = f\"{time}_exp.dat\"\n", + " pdb_file1 = f\"snapshot1_{time}.pdb\"\n", + "\n", + " command5 = f\"foxs -r -g {pdb_file1} {dat_file1}\"\n", + " os.system(command5)\n", + " print(f\"FoXS for {time} is calculated and ready to create a plot. Nr of states is: {state_dict[time]}\")\n", + "\n", + " command6 = f\"gnuplot snapshot1_{time}_{time}_exp.plt\"\n", + " os.system(command6)\n", + "\n", + " command7 = f\"mv snapshot1_{time}_{time}_exp.plt {time}_FoXS.plt\"\n", + " os.system(command7)\n", + "\n", + " command8 = f\"mv snapshot1_{time}_{time}_exp.png {time}_FoXS.png\"\n", + " os.system(command8)\n", + " else:\n", + " print(f\"There is no states in this timepoint. Check stat_dict.\")\n", + "\n", + " except Exception as e:\n", + " print(f\"FoXS can not be executed properly due to following problem: {e}\")\n", + "\n", + "\n", + "# 4a - SAXS\n", + "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", + "SAXS_output = \"./SAXS_comparison/\"\n", + "os.makedirs(SAXS_output, exist_ok=True)\n", + "os.chdir(SAXS_output)\n", + "model = IMP.Model()\n", + "convert_rmfs(state_dict, model)\n", + "copy_SAXS_dat_files(custom_src_dir='../../modeling/Input_Information/gen_SAXS')\n", + "process_foxs(state_dict)\n", + "print(\"Step 4a: SAXS validation IS COMPLETED\")\n", + "print(\"\")\n", + "print(\"\")" + ] + }, + { + "cell_type": "markdown", + "id": "bef8310b-99a2-4e71-b4b7-88c0dbb06def", + "metadata": {}, + "source": [ + "The output of this analysis is written to `SAXS_comparison`. Standard FoXS outputs are available for each snapshot model (`snapshot{state}_{time}.*`). In particular, the `.fit` files include the forward and experimental profiles side by side, with the $\\chi^2$ for the fit. Further, the `{time}_FoXS.*` files include the information for all snapshot models at that time point, including plots of each profile in comparison to the experimental profile (`{time}_FoXS.png`)." + ] + }, + { + "cell_type": "markdown", + "id": "d4ebc14c-1eba-45ff-b98e-2938e65057a4", + "metadata": {}, + "source": [ + "As our model was generated from synthetic data, the ground truth structure is known at each time point. In addition to validating the model by assessing its comparison to SAXS data, we could approximate the model accuracy by comparing the snapshot model to the PDB structure, although this comparison is not perfect as the PDB structure was used to inform the structure of *rigid bodies* in the snapshot model. To do so, we wrote a function (`RMSD`) that calculates the RMSD between each structural model and the orignal PDB. The function is too computationally expensive to run in this notebook, but is found in the `Trajectories/Trajectories_Assessment/` folder and is demonstrated below.\n", + "\n", + "\\code{.py}\n", + "## 4b - RMSD", + "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", + "pdb_path = \"../../Input_Information/PDB/3rpg.pdb\"\n", + "RMSD(pdb_path=pdb_path, custom_n_plot=20)\n", + "print(\"Step 4b: RMSD validation IS COMPLETED\")\n", + "print(\"\")\n", + "print(\"\")\n", + "\\endcode\n", + "\n", + "The output of this function is written in `RMSD_calculation_output`. The function outputs `rmsd_{state}_{time}.png` files, which plots the RMSD for each structural model within each snapshot model. This data is then summarized in `RMSD_analysis.txt`, which includes the minimum RMSD, average RMSD, and number of structural models in each snapshot model.\n" + ] + }, + { + "cell_type": "markdown", + "id": "64e1a2f6-7258-462d-bde3-7734743b5aa1", + "metadata": {}, + "source": [ + "Finally, we plot the results for assessing the spatiotemporal model with data not used to construct it. Comparisons are made between the centroid structure of the most populated cluster in each snapshot model at each time point and the experimental SAXS profile for 0 (a), 1 (b), and 2 (c) minutes. Further, we plot both the sampling precision (dark red) and the RMSD to the PDB structure (light red) for each snapshot model in the highest trajectory model (d).\n", + "\n", + "\n", + "\n", + "To quantitatively compare the model to SAXS data, we used the $\\chi^2$ to compare each snapshot model to the experimental profile. We note that the $\\chi^2$ are substantially lower for the models along the highest trajectory model (1_0min, 1_1min, and 1_2min) than for other models, indicating that the highest weighted trajectory model is in better agreement with the experimental SAXS data than other possible trajectory models.\n", + "\n", + "\n", + "\n", + "Next, we can evaluate the accuracy of the model by comparing the RMSD to the PDB to the sampling precision of each snapshot model. We note that this is generally not possible, because in most biological applications the ground truth is not known. In this case, if the average RMSD to the PDB structure is smaller than the sampling precision, the PDB structure lies within the precision of the model. We find that the RMSD is within 1.5 \u00c5 of the sampling precision at all time points, indicating that the model lies within 1.5 \u00c5 of the ground truth.\n" + ] + }, + { + "cell_type": "markdown", + "id": "98f7a12c-d7e9-49ed-9a16-b713c3b4762a", + "metadata": {}, + "source": [ + "# Next steps\n", + "\n", + "After assessing our model, we can must decide if the model is sufficient to answer biological questions of interest. If the model does not have sufficient precision for the desired application, assessment of the current model can be used to inform which new experiments may help improve the next iteration of the model. The [integrative spatiotemporal modeling procedure](https://integrativemodeling.org/tutorials/spatiotemporal/index.html#steps) can then be repeated iteratively, analogous to [integrative modeling of static structures](https://integrativemodeling.org/2.21.0/doc/manual/intro.html#procedure).\n", + "\n", + "If the model is sufficient to provide insight into the biological process of interest, the user may decide that it is ready for publication. In this case, the user should create an [mmCIF file](https://mmcif.wwpdb.org/) to deposit the model in the [PDB-dev database](https://pdb-dev.wwpdb.org/). This procedure is explained in the [deposition tutorial](https://integrativemodeling.org/tutorials/deposition/develop/).\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file diff --git a/Jupyter/template.spatiotemporal.ipynb b/Jupyter/spatiotemporal.ipynb similarity index 83% rename from Jupyter/template.spatiotemporal.ipynb rename to Jupyter/spatiotemporal.ipynb index c07d0fcdb..9761fb706 100644 --- a/Jupyter/template.spatiotemporal.ipynb +++ b/Jupyter/spatiotemporal.ipynb @@ -3,12 +3,14 @@ { "cell_type": "markdown", "id": "4a3dfff8-9d60-418c-a5fd-a4adf7a121b1", - "metadata": {}, + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, "source": [ - "IMP spatiotemporal tutorial {#notebook}\n", + "IMP spatiotemporal tutorial\n", "========\n", "\n", - "# Introduction {#notebook_introduction}\n", + "# Introduction\n", "\n", "Biomolecules are constantly in motion; therefore, a complete depiction of their function must include their dynamics instead of just static structures. We have developed an integrative spatiotemporal approach to model dynamic systems.\n", "\n", @@ -22,7 +24,7 @@ "- Latham, A.P.; Tempkin, J.O.B.; Otsuka, S.; Zhang, W.; Ellenberg, J.; Sali, A. bioRxiv, 2024, https://doi.org/10.1101/2024.08.06.606842.\n", "- Latham, A.P.; Rožič, M.; Webb, B.M., Sali, A. in preparation. (tutorial)\n", "\n", - "# Integrative spatiotemporal modeling workflow {#notebook_steps}\n", + "# Integrative spatiotemporal modeling workflow\n", "\n", "In general, integrative modeling proceeds through three steps (i. gathering information; ii. choosing the model representation, scoring alternative models, and searching for good scoring models; and iii. assessing the models). In integrative spatiotemporal modeling, these three steps are repeated for each modeling problem in the composite workflow (i. modeling of heterogeneity, ii. modeling of snapshots, and iii. modeling of a trajectory).\n", "\n", @@ -39,12 +41,12 @@ "id": "dc9d93b4-3b97-4187-867c-977f8353f4aa", "metadata": {}, "source": [ - "Modeling of heterogeneity {#notebook_heterogeneity}\n", + "Modeling of heterogeneity\n", "====================================\n", "\n", "Here, we describe the first modeling problem in our composite workflow, how to build models of heterogeneity modeling using IMP. In this tutorial, heterogeneity modeling only includes protein copy number; however, in general, other types of information, such as the coarse location in the final state, could also be included in heterogeneity models.\n", "\n", - "# Heterogeneity modeling step 1: gathering of information {#notebook_heterogeneity1}\n", + "# Heterogeneity modeling step 1: gathering of information\n", "\n", "We begin heterogeneity modeling with the first step of integrative modeling, gathering information. Heterogeneity modeling will rely on copy number information about the complex. In this case, we utilize the X-ray crystal structure of the fully assembled Bmi1/Ring1b-UbcH5c complex from the protein data bank (PDB), and synthetically generated protein copy numbers during the assembly process, which could be generated from experiments such as flourescence correlation spectroscopy (FCS).\n", "\n", @@ -52,35 +54,24 @@ "\n", "The PDB structure of the complex informs the final state of our model and constrains the maximum copy number for each protein, while the protein copy number data gives time-dependent information about the protein copy number in the assembling complex.\n", "\n", - "# Heterogeneity modeling step 2: representation, scoring function, and search process {#notebook_heterogeneity2}\n", + "# Heterogeneity modeling step 2: representation, scoring function, and search process\n", "\n", "Next, we represent, score and search for heterogeneity models models. A single heterogeneity model is a set of protein copy numbers, scored according to its fit to experimental copy number data at that time point. As ET and SAXS data, are only available at 0 minutes, 1 minute, and 2 minutes, we choose to create heterogeneity models at these three time points. We then use `prepare_protein_library`, to calculate the protein copy numbers for each snapshot model and to use the topology file of the full complex (`spatiotemporal_topology.txt`) to generate a topology file for each of these snapshot models. The choices made in this topology file are important for the representation, scoring function, and search process for snapshot models, and are discussed later. For heterogeneity modeling, we choose to model 3 protein copy numbers at each time point, and restrict the final time point to have the same protein copy numbers as the PDB structure.\n" ] }, { "cell_type": "code", - "execution_count": null, - "id": "66fae319-6215-4712-835d-b09606974260", - "metadata": {}, - "outputs": [], - "source": [ - "#%%colabonly\n", - "# For colab, we need to install IMP\n", - "!add-apt-repository -y ppa:salilab/ppa\n", - "!apt install imp\n", - "import sys, os, glob\n", - "sys.path.append(os.path.dirname(glob.glob('/usr/lib/python*/dist-packages/IMP')[0]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, + "execution_count": 49, "id": "1ff4d3e5-04de-4092-8cfc-2ad3018675da", "metadata": {}, "outputs": [], "source": [ "# General imports for the tutorial\n", "import sys, os, glob, shutil\n", + "import IMP\n", + "import IMP.atom\n", + "import RMF\n", + "import IMP.rmf\n", "from IMP.spatiotemporal import prepare_protein_library\n", "import IMP.spatiotemporal as spatiotemporal\n", "from IMP.spatiotemporal import analysis\n", @@ -90,18 +81,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 50, "id": "6b78a015-32e5-4701-8c6f-df14863ba9ce", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Successfully calculated the most likely configurations, and saved them to configuration and topology files.\n" + ] + } + ], "source": [ + "main_dir = os.getcwd()\n", + "os.chdir(main_dir)\n", "# parameters for prepare_protein_library:\n", "times = [\"0min\", \"1min\", \"2min\"]\n", - "exp_comp = {'A': '../../../modeling/Input_Information/gen_FCS/exp_compA.csv',\n", - " 'B': '../../../modeling/Input_Information/gen_FCS/exp_compB.csv',\n", - " 'C': '../../../modeling/Input_Information/gen_FCS/exp_compC.csv'}\n", + "exp_comp = {'A': '../modeling/Input_Information/gen_FCS/exp_compA.csv',\n", + " 'B': '../modeling/Input_Information/gen_FCS/exp_compB.csv',\n", + " 'C': '../modeling/Input_Information/gen_FCS/exp_compC.csv'}\n", "expected_subcomplexes = ['A', 'B', 'C']\n", - "template_topology = 'spatiotemporal_topology.txt'\n", + "template_topology = '../modeling/Heterogeneity/Heterogeneity_Modeling/spatiotemporal_topology.txt'\n", "template_dict = {'A': ['Ubi-E2-D3'], 'B': ['BMI-1'], 'C': ['E3-ubi-RING2']}\n", "nmodels = 3\n", "\n", @@ -119,7 +120,7 @@ "- *.config, a file with a list of proteins represented in the heterogeneity model\n", "- *_topol.txt, a topology file for snapshot modeling corresponding to this heterogeneity model.\n", "\n", - "# Heterogeneity modeling step 3: assessment {#notebook_heterogeneity_assess}\n", + "# Heterogeneity modeling step 3: assessment\n", "\n", "Now, we have a variety of heterogeneity models. In general, there are four ways to assess a model: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model. Here, we will focus specifically on comparing the model to experimental data, as other assessments will be performed later, when the trajectory models are assessed.\n", "\n", @@ -135,12 +136,12 @@ "id": "496f2687-bb05-4f51-8eab-a1170f6ac5fa", "metadata": {}, "source": [ - "Modeling of snapshots {#notebook_snapshots}\n", + "Modeling of snapshots\n", "====================================\n", "\n", "Here, we describe the second modeling problem in our composite workflow, how to build models of static snapshot models using IMP. We note that this process is similar to previous tutorials of [actin](https://integrativemodeling.org/tutorials/actin/) and [RNA PolII](https://integrativemodeling.org/tutorials/rnapolii_stalk/).\n", "\n", - "# Snapshot modeling step 1: gathering of information {#notebook_snapshots1}\n", + "# Snapshot modeling step 1: gathering of information\n", "\n", "We begin snapshot modeling with the first step of integrative modeling, gathering information. Snapshot modeling utilizes structural information about the complex. In this case, we utilize heterogeneity models, the X-ray crystal structure of the fully assembled Bmi1/Ring1b-UbcH5c complex from the protein data bank (PDB), synthetically generated electron tomography (ET) density maps during the assembly process, and physical principles.\n", "\n", @@ -148,15 +149,14 @@ "\n", "The heterogeneity models inform protein copy numbers for the snapshot models. The PDB structure of the complex informs the structure of the individual proteins. The time-dependent ET data informs the size and shape of the assembling complex. physical principles inform connectivity and excluded volume.\n", "\n", - "# Snapshot modeling step 2: representation, scoring function, and search process {#notebook_snapshots2}\n", + "# Snapshot modeling step 2: representation, scoring function, and search process\n", "\n", "Next, we represent, score and search for snapshot models. This step is quite computationally expensive. Therefore, we will not run the modeling protocol in this notebook, though the scripts are available in `modeling/Snapshots/Snapshots_Modeling/`. Here, we will simply describe the important steps made by two scripts. The first, `static_snapshot.py`, uses IMP to represent, score, and search for a single static snapshot model. The second, `start_sim.py`, automates the creation of a snapshot model for each heterogeneity model.\n", "\n", - "## Modeling one snapshot\n", - "\n", + "## Modeling one snapshot\n", "Here, we will describe the process of modeling a single snapshot model, as performed by running `static_snapshot.py`.\n", "\n", - "### Representing the model {#notebook_snapshot_representation}\n", + "### Representing the model\n", "\n", "We begin by representing the data and the model. In general, the *representation* of a system is defined by all the variables that need to be determined.\n", "\n", @@ -164,9 +164,9 @@ "\n", "Beads and Gaussians in our model belong to either a *rigid body* or *flexible string*. The positions of all beads and Gaussians in a single rigid body are constrained during sampling and do not move relative to each other. Meanwhile, flexible beads can move freely during sampling, but are restrained by sequence connectivity.\n", "\n", - "To begin, we built a topology file with the representation for the model of the complete system, `spatiotemporal_topology.txt`, located in `Heterogeneity/Heterogeneity_Modeling/`. This complete topology was used as a template to build topologies of each heterogeneity model. Based on our observation of the structure of the complex, we chose to represent each protein with at least 2 separate rigid bodies, and left the first 28 residues of protein C as flexible beads. Rigid bodies were described with 1 bead for every residue, and 10 residues per Gaussian. Flexible beads were described with 1 bead for every residue and 1 residue per Gaussian. A more complete description of the options available in topology files is available in the the [TopologyReader](@ref IMP::pmi::topology::TopologyReader) documentation.\n", + "To begin, we built a topology file with the representation for the model of the complete system, `spatiotemporal_topology.txt`, located in `Heterogeneity/Heterogeneity_Modeling/`. This complete topology was used as a template to build topologies of each heterogeneity model. Based on our observation of the structure of the complex, we chose to represent each protein with at least 2 separate rigid bodies, and left the first 28 residues of protein C as flexible beads. Rigid bodies were described with 1 bead for every residue, and 10 residues per Gaussian. Flexible beads were described with 1 bead for every residue and 1 residue per Gaussian. A more complete description of the options available in topology files is available in the the [TopologyReader](https://integrativemodeling.org/2.21.0/doc/ref/classIMP_1_1pmi_1_1topology_1_1TopologyReader.html) documentation.\n", "\n", - "\\code{.txt}\n", + "```\n", "|molecule_name | color | fasta_fn | fasta_id | pdb_fn | chain | residue_range | pdb_offset | bead_size | em_residues_per_gaussian | rigid_body | super_rigid_body | chain_of_super_rigid_bodies | \n", "\n", "|Ubi-E2-D3|blue|3rpg.fasta.txt|Ubi-E2-D3|3rpg.pdb|A|-1,18|2|1|10|1|1||\n", @@ -175,92 +175,89 @@ "|BMI-1|red|3rpg.fasta.txt|BMI-1|3rpg.pdb|B|84,101|-2|1|10|4|2||\n", "|E3-ubi-RING2|green|3rpg.fasta.txt|E3-ubi-RING2|BEADS|C|16,44|-15|1|1|5|3||\n", "|E3-ubi-RING2|green|3rpg.fasta.txt|E3-ubi-RING2|3rpg.pdb|C|45,116|-15|1|10|6|3||\n", - "\\endcode\n", + "```\n", "\n", "Next, we must prepare `static_snapshot.py` to read in this topology file. We begin by defining the input variables, `state` and `time`, which define which topology to use, as well as the paths to other pieces of input information.\n", "\n", - "\\code{.py}\n", - "### Running parameters to access correct path of ET_data for EM restraint\n", - "### and topology file for certain {state}_{time}_topol.txt\n", + "```python\n", + "# Running parameters to access correct path of ET_data for EM restraint\n", + "# and topology file for certain {state}_{time}_topol.txt\n", "state = sys.argv[1]\n", "time = sys.argv[2]\n", "\n", - "### Topology file\n", + "# Topology file\n", "topology_file = f\"../{state}_{time}_topol.txt\"\n", - "### Paths to input data for topology file\n", + "# Paths to input data for topology file\n", "pdb_dir = \"../../../../Input_Information/PDB\"\n", "fasta_dir = \"../../../../Input_Information/FASTA\"\n", - "### Path where forward gmms are created with BuildSystem (based ont topology file)\n", - "### If gmms exist, they will be used from this folder\n", + "# Path where forward gmms are created with BuildSystem (based ont topology file)\n", + "# If gmms exist, they will be used from this folder\n", "forward_gmm_dir = \"../forward_densities/\"\n", - "### Path to experimental gmms\n", + "# Path to experimental gmms\n", "exp_gmm_dir= '../../../../Input_Information/ET_data/add_noise'\n", - "\\endcode\n", + "```\n", "\n", "Next, we build the system, using the topology tile, described above.\n", - "\\code{.py}\n", - "### Create a system from a topology file. Resolution is set on 1.\n", + "```python\n", + "# Create a system from a topology file. Resolution is set on 1.\n", "bs = IMP.pmi.macros.BuildSystem(mdl, resolutions= 1, name= f'Static_snapshots_{state}_{time}')\n", "bs.add_state(t)\n", - "\\endcode\n", + "```\n", "\n", "Then, we prepare for later sampling steps by setting which Monte Carlo moves will be performed. Rotation (`rot`) and translation (`trans`) parameters are separately set for super rigid bodies (`srb`), rigid bodies (`rb`), and beads (`bead`).\n", - "\\code{.py}\n", - "### Macro execution: It gives hierarchy and degrees of freedom (dof).\n", - "### In dof we define how much can each (super) rigid body translate and rotate between two adjacent Monte Carlo steps\n", + "```python\n", + "# Macro execution: It gives hierarchy and degrees of freedom (dof).\n", + "# In dof we define how much can each (super) rigid body translate and rotate between two adjacent Monte Carlo steps\n", "root_hier, dof = bs.execute_macro(max_rb_trans=1.0,\n", " max_rb_rot=0.5, max_bead_trans=2.0,\n", " max_srb_trans=1.0, max_srb_rot=0.5)\n", - "\\endcode\n", + "```\n", "\n", - "### Scoring the model {#notebook_snapshot_scoring}\n", + "### Scoring the model\n", "\n", "After building the model representation, we choose a scoring function to score the model based on input information. This scoring function is represented as a series of restraints that serve as priors.\n", "\n", - "#### Connectivity\n", - "\n", + "#### Connectivity\n", "We begin with a connectivity restraint, which restrains beads adjacent in sequence to be close in 3D space.\n", "\n", - "\\code{.py}\n", - "#### Adding Restraints\n", - "#### Empty list where the data from restraints should be collected\n", + "```python\n", + "# Adding Restraints\n", + "# Empty list where the data from restraints should be collected\n", "output_objects=[]\n", "\n", - "#### Two common restraints: ConnectivityRestraint and ExcludedVolumeSphere\n", - "#### ConnectivityRestraint is added for each \"molecule\" separately\n", + "# Two common restraints: ConnectivityRestraint and ExcludedVolumeSphere\n", + "# ConnectivityRestraint is added for each \"molecule\" separately\n", "for m in root_hier.get_children()[0].get_children():\n", " cr = IMP.pmi.restraints.stereochemistry.ConnectivityRestraint(m)\n", " cr.add_to_model()\n", " output_objects.append(cr)\n", - "\\endcode\n", - "\n", - "#### Excluded volume\n", + "```\n", "\n", + "#### Excluded volume\n", "Next is an excluded volume restraint, which restrains beads to minimize their spatial overlap.\n", "\n", - "\\code{.py}\n", - "#### Add excluded volume\n", + "```python\n", + "# Add excluded volume\n", "evr = IMP.pmi.restraints.stereochemistry.ExcludedVolumeSphere(\n", " included_objects=[root_hier],\n", " resolution=1000)\n", "output_objects.append(evr)\n", "evr.add_to_model()\n", - "\\endcode\n", - "\n", - "#### Electron tomography\n", + "```\n", "\n", + "#### Electron tomography\n", "Finally, we restrain our models based on their fit to ET density maps. Both the experimental map and the forward protein density are represented as Gaussian mixture models (GMMs) to speed up scoring. The score is based on the log of the correlation coefficient between the experimental density and the forward protein density.\n", "\n", - "\\code{.py}\n", - "#### Applying time-dependent EM restraint. Point to correct gmm / mrc file at each time point\n", - "#### Path to corresponding .gmm file (and .mrc file)\n", + "```python\n", + "# Applying time-dependent EM restraint. Point to correct gmm / mrc file at each time point\n", + "# Path to corresponding .gmm file (and .mrc file)\n", "em_map = exp_gmm_dir + f\"/{time}_noisy.gmm\"\n", "\n", - "#### Create artificial densities from hierarchy\n", + "# Create artificial densities from hierarchy\n", "densities = IMP.atom.Selection(root_hier,\n", " representation_type=IMP.atom.DENSITIES).get_selected_particles()\n", "\n", - "#### Create EM restraint based on these densities\n", + "# Create EM restraint based on these densities\n", "emr = IMP.pmi.restraints.em.GaussianEMRestraint(\n", " densities,\n", " target_fn=em_map,\n", @@ -269,18 +266,18 @@ " weight=1000)\n", "output_objects.append(emr)\n", "emr.add_to_model()\n", - "\\endcode\n", + "```\n", "\n", - "### Searching for good scoring models {#notebook_snapshot_searching}\n", + "### Searching for good scoring models\n", "\n", "After building a scoring function that scores alternative models based on their fit to the input information, we aim to search for good scoring models. For complicated systems, stochastic sampling techniques such as Monte Carlo (MC) sampling are often the most efficient way to compute good scoring models. Here, we generate a random initial configuration and then perform temperature replica exchange MC sampling with 16 temperatures from different initial configurations. By performing multiple runs of replica exchange MC from different initial configurations, we can later ensure that our sampling is sufficiently converged.\n", "\n", - "\\code{.py}\n", - "### Generate random configuration\n", + "```python\n", + "# Generate random configuration\n", "IMP.pmi.tools.shuffle_configuration(root_hier,\n", " max_translation=50)\n", "\n", - "### Perform replica exchange sampling\n", + "# Perform replica exchange sampling\n", "rex=IMP.pmi.macros.ReplicaExchange(mdl,\n", " root_hier=root_hier,\n", " monte_carlo_sample_objects=dof.get_movers(),\n", @@ -289,54 +286,54 @@ " monte_carlo_steps=200, # Number of MC steps between writing frames.\n", " number_of_best_scoring_models=0,\n", " number_of_frames=500) # number of frames to be saved\n", - "### In our case, for each snapshot we generated 25000 frames altogether (50*500)\n", + "# In our case, for each snapshot we generated 25000 frames altogether (50*500)\n", "rex.execute_macro()\n", - "\\endcode\n", + "```\n", "\n", "After performing sampling, a variety of outputs will be created. These outputs include `.rmf` files, which contain multi-resolution models output by IMP, and `.out` files which contains a variety of information about the run such as the value of the restraints and the MC acceptance rate.\n", "\n", - "## Generalizing modeling to all snapshots {#notebook_snapshot_combine}\n", + "## Generalizing modeling to all snapshots\n", "\n", "Next, we will describe the process of computing multiple static snapshot models, as performed by running `start_sim.py`.\n", "\n", "From heterogeneity modeling, we see that there are 3 heterogeneity models at each time point (it is possible to have more snapshot models than copy numbers if multiple copies of the protein exist in the complex), each of which has a corresponding topology file in `Heterogeneity/Heterogeneity_Modeling/`. We wrote a function, `generate_all_snapshots`, which creates a directory for each snapshot model, copies the python script and topology file into that directory, and submits a job script to run sampling. The job script will likely need to be customized for the user's computer or cluster.\n", "\n", - "\\code{.py}\n", - "## 1a - parameters for generate_all_snapshots\n", - "## state_dict - universal parameter\n", + "```python\n", + "# 1a - parameters for generate_all_snapshots\n", + "# state_dict - universal parameter\n", "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", "\n", "main_dir = os.getcwd()\n", "topol_dir = os.path.join(os.getcwd(), '../../Heterogeneity/Heterogeneity_Modeling')\n", "items_to_copy = ['static_snapshot.py'] # additionally we need to copy only specific topology file\n", - "## jobs script will likely depend on the user's cluster / configuration\n", + "# jobs script will likely depend on the user's cluster / configuration\n", "job_template = (\"#!/bin/bash\\n#$ -S /bin/bash\\n#$ -cwd\\n#$ -r n\\n#$ -j y\\n#$ -N Tutorial\\n#$ -pe smp 16\\n\"\n", " \"#$ -l h_rt=48:00:00\\n\\nmodule load Sali\\nmodule load imp\\nmodule load mpi/openmpi-x86_64\\n\\n\"\n", " \"mpirun -np $NSLOTS python3 static_snapshot.py {state} {time}\")\n", "number_of_runs = 50\n", "\n", - "## 1b - calling generate_all_snapshots\n", + "# 1b - calling generate_all_snapshots\n", "generate_all_snapshots(state_dict, main_dir, topol_dir, items_to_copy, job_template, number_of_runs)\n", "\n", - "\\endcode\n", + "```\n", "\n", - "# Snapshot modeling step 3: assessment {#notebook_snapshot_assess}\n", + "# Snapshot modeling step 3: assessment\n", "\n", "The above code would variety of alternative snapshot models. In general, we would like to assess these models in at least 4 ways: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model. In this portion of the tutorial, we focus specifically on estimating the sampling precision of the model, while quantitative comparisons between the model and experimental data will be reserved for the final step, when we assess trajectories. Again, this assessment process is quite computationally intensive, so, instead of running the script explicitly, we will walk you through the `snapshot_assessment.py` script, which is located in the `modeling/Snapshots/Snapshots_Assessment` folder.\n", "\n", - "## Filtering good scoring models {#notebook_snapshot_filter}\n", + "## Filtering good scoring models\n", "\n", "Initially, we want to filter the various alternative structural models to only select those that meet certain parameter thresholds. In this case, we filter the structural models comprising each snapshot model by the median cross correlation with EM data. We note that this filtering criteria is subjective, and developing a Bayesian method to objectively weigh different restraints for filtering remains an interesting future development in integrative modeling.\n", "\n", "The current filtering procedure involves three steps. In the first step, we look through the `stat.*.out` files to write out the cross correlation with EM data for each model, which, in this case, is labeled column `3`, `GaussianEMRestraint_None_CCC`. In other applications, the column that corresponds to each type of experimental data may change, depending on the scoring terms for each model. For each snapshot model, a new file is written with this data (`{state}_{time}_stat.txt`).\n", "\n", - "\\code{.py}\n", - "## state_dict - universal parameter\n", + "```python\n", + "# state_dict - universal parameter\n", "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", - "## current directory\n", + "# current directory\n", "main_dir = os.getcwd()\n", "\n", - "## 1 calling extracting_stat_files function and related parameters\n", + "# 1 calling extracting_stat_files function and related parameters\n", "keys_to_extract = [3]\n", "runs_nr = 50\n", "replica_nr = 16\n", @@ -347,12 +344,12 @@ "print(\"extracting_stat_files is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "In the second step, we want to determine the median value of EM cross correlation for each snapshot model. We wrote `general_rule_calculation` to look through the `general_rule_column` for each `{state}_{time}_stat.txt` file and determine both the median value and the number of structures generated.\n", "\n", - "\\code{.py}\n", - "## 2 calling general_rule_calculation and related parameters\n", + "```python\n", + "# 2 calling general_rule_calculation and related parameters\n", "general_rule_column = '3'\n", "\n", "general_rule_calculation(state_dict, general_rule_column)\n", @@ -360,24 +357,24 @@ "print(\"general_rule_calculation is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "In the third step, we use the `imp_sampcon select_good` tool to filter each snapshot model, according to the median value determined in the previous step. For each snapshot model, this function produces a file, `good_scoring_models/model_ids_scores.txt`, which contains the run, replicaID, scores, and sampleID for each model that passes filtering. It also saves RMF files with each model from two independent groups of sampling runs from each snapshot model to `good_scoring_models/sample_A` and `good_scoring_models/sample_B`, writes the scores for the two independent groups of sampling runs to `good_scoring_models/scoresA.txt` and `good_scoring_models/scoresB.txt`, and writes `good_scoring_models/model_sample_ids.txt` to connect each model to its division of sampling run. More information on `imp_sampcon` is available in the analysis portion of the [actin tutorial](https://integrativemodeling.org/tutorials/actin/analysis.html).\n", "\n", - "\\code{.py}\n", - "## 3 calling general_rule_filter_independent_samples\n", + "```python\n", + "# 3 calling general_rule_filter_independent_samples\n", "general_rule_filter_independent_samples(state_dict, main_dir)\n", "print(\"general_rule_filter_independent_samples is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", - "## Plotting data, clustering models, and determining sampling precision {#notebook_snapshot_sampling_precision}\n", + "## Plotting data, clustering models, and determining sampling precision\n", "\n", "Next, scores can be plotted for analysis. Here, we wrote the `create_histograms` function to run `imp_sampcon plot_score` so that it plots distributions for various scores of interest. Each of these plots are saved to `histograms{state}_{time}/{score}.png`, where score is an object listed in the `score_list`. These plots are useful for debugging the modeling protocol, and should appear roughly Gaussian.\n", "\n", - "\\code{.py}\n", - "## 4 calling create_histograms and related parameters\n", + "```python\n", + "# 4 calling create_histograms and related parameters\n", "score_list = [\n", " 'Total_Score',\n", " 'ConnectivityRestraint_Score',\n", @@ -390,37 +387,37 @@ "print(\"create_histograms is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "We then check the number of models in each sampling run though our function, `count_rows_and_generate_report`, which writes the `independent_samples_stat.txt` file. Empirically, we have found that ensuring the overall number of models in each independent sample after filtering is roughly equal serves a good first check on sampling convergence.\n", "\n", - "\\code{.py}\n", - "## 5 calling count_rows_and_generate_report\n", + "```python\n", + "# 5 calling count_rows_and_generate_report\n", "count_rows_and_generate_report(state_dict)\n", "print(\"count_rows_and_generate_report is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "Next, we write the density range dictionaries, which are output as `{state}_{time}_density_ranges.txt`. These dictionaries label each protein in each snapshot model, which will be passed into `imp_sampcon` to calculate the localization density of each protein.\n", "\n", - "\\code{.py}\n", - "## 6 calling create_density_dictionary:\n", + "```python\n", + "# 6 calling create_density_dictionary:\n", "create_density_dictionary_files(state_dict, main_dir)\n", "print(\"create_density_dictionary is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "Next, we run `imp_sampcon exhaust` on each snapshot model. This code performs checks on the exhaustiveness of the sampling. Specifically it analyzes the convergence of the model score, whether the two model sets were drawn from the same distribution, and whether each structural cluster includes models from each sample proportionally to its size. The output for each snapshot model is written out to the `exhaust_{state}_{time}` folder.\n", "\n", - "\\code{.py}\n", - "## 7 calling exhaust\n", + "```python\n", + "# 7 calling exhaust\n", "exhaust(state_dict, main_dir)\n", "print(\"exhaust is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "Plots for determining the sampling precision are shown below for a single snapshot model, 1_2min. (a) Tests the convergence of the lowest scoring model (`snapshot_{state}_{time}.Top_Score_Conv.pdf`). Error bars represent standard deviations of the best scores, estimated by selecting different subsets of models 10 times. The light-blue line indicates a lower bound reference on the total score. (b) Tests that the scores of two independently sampled models come from the same distribution (`snapshot_{state}_{time}.Score_Dist.pdf`). The difference between the two distributions, as measured by the KS test statistic (D) and KS test p-value (p) indicates that the difference is both statistically insignificant (p>0.05) and small in magnitude (D<0.3). (c) Determines the structural precision of a snapshot model (`snapshot_{state}_{time}.ChiSquare.pdf`). RMSD clustering is performed at 1 Å intervals until the clustered population (% clustered) is greater than 80%, and either the χ2 p-value is greater than 0.05 or Cramer’s V is less than 0.1. The sampling precision is indicated by the dashed black line. (d) Populations from sample 1 and sample 2 are shown for each cluster (`snapshot_{state}_{time}.Cluster_Population.pdf`).\n", "\n", @@ -430,25 +427,25 @@ "\n", "Ideally, each of these plots should be checked for each snapshot model. As a way to summarize the output of these checks, we can gather the results of the KS test and the sampling precision test for all snapshot models. This is done by running `extract_exhaust_data` and `save_exhaust_data_as_png`, which write `KS_sampling_precision_output.txt` and `KS_sampling_precision_output.png`, respectively.\n", "\n", - "\\code{.py}\n", - "## 8 calling extract_exhaust_data\n", + "```python\n", + "# 8 calling extract_exhaust_data\n", "extract_exhaust_data(state_dict)\n", "print(\"extract_exhaust_data is DONE\")\n", "print(\"\")\n", "print(\"\")\n", "\n", - "## 9 calling save_exhaust_data_as_png\n", + "# 9 calling save_exhaust_data_as_png\n", "save_exhaust_data_as_png()\n", "print(\"save_exhaust_data_as_png is DONE\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "These codes write a table that include the KS two sample test statistic (D), the KS test p-value, and the sampling precision for each snapshot model, which is replotted below.\n", "\n", "\n", "\n", - "## Visualizing models {#notebook_snapshot_visualization}\n", + "## Visualizing models\n", "\n", "The resulting RMF files and localization densities from this analysis can be viewed in [UCSF Chimera](https://www.rbvi.ucsf.edu/chimera/) (version>=1.13) or [UCSF ChimeraX](https://www.cgl.ucsf.edu/chimerax/).\n", "\n", @@ -464,12 +461,12 @@ "id": "6fc2546f-7c6f-4146-8c9b-ee143fcead6e", "metadata": {}, "source": [ - "Modeling of a Trajectory {#notebook_trajectories}\n", + "Modeling of a Trajectory\n", "====================================\n", "\n", "Here, we describe the final modeling problem in our composite workflow, how to build models of trajectory models using IMP.\n", "\n", - "# Trajectory modeling step 1: gathering of information {#notebook_trajectories1}\n", + "# Trajectory modeling step 1: gathering of information\n", "\n", "We begin trajectory modeling with the first step of integrative modeling, gathering information. Trajectory modeling utilizes dynamic information about the bimolecular process. In this case, we utilize heterogeneity models, snapshot models, physical theories, and synthetically generated small-angle X-ray scattering (SAXS) profiles.\n", "\n", @@ -477,17 +474,16 @@ "\n", "Heterogeneity models inform the possible compositional states at each time point and measure how well a compositional state agrees with input information. Snapshot models provide structural models for each heterogeneity model and measure how well those structural models agree with input information about their structure. Physical theories of macromolecular dynamics inform transitions between states. SAXS data informs the size and shape of the assembling complex and is left for validation.\n", "\n", - "# Trajectory modeling step 2: representation, scoring function, and search process {#notebook_trajectories2}\n", + "# Trajectory modeling step 2: representation, scoring function, and search process\n", "\n", "Trajectory modeling connects alternative snapshot models at adjacent time points, followed by scoring the trajectory models based on their fit to the input information, as described in full [here](https://www.biorxiv.org/content/10.1101/2024.08.06.606842v1.abstract).\n", "\n", - "## Background behind integrative spatiotemporal modeling\n", - "\n", - "### Representing the model {#notebook_trajectory_representation}\n", + "## Background behind integrative spatiotemporal modeling\n", + "### Representing the model\n", "\n", "We choose to represent dynamic processes as a trajectory of snapshot models, with one snapshot model at each time point. In this case, we computed snapshot models at 3 time points (0, 1, and 2 minutes), so a single trajectory model will consist of 3 snapshot models, one at each 0, 1, and 2 minutes. The modeling procedure described here will produce a set of scored trajectory models, which can be displayed as a directed acyclic graph, where nodes in the graph represent the snapshot model and edges represent connections between snapshot models at neighboring time points.\n", "\n", - "### Scoring the model {#notebook_trajectory_scoring}\n", + "### Scoring the model\n", "\n", "To score trajectory models, we incorporate both the scores of individual snapshot models, as well as the scores of transitions between them. Under the assumption that the process is Markovian (*i.e.* memoryless), the weight of a trajectory model takes the form:\n", "\n", @@ -495,23 +491,38 @@ "W(\\chi) \\propto \\displaystyle\\prod^{T}_{t=0} P( X_{N,t}, N_{t} | D_{t}) \\cdot \\displaystyle\\prod^{T-1}_{t=0} W(X_{N,t+1},N_{t+1} | X_{N,t},N_{t}, D_{t,t+1}),\n", "$$\n", "\n", - "where $t$ indexes times from 0 until the final modeled snapshot ($T$); $P(X_{N,t}, N_{t} | D_{t})$ is the snapshot model score; and \\f$W(X_{N,t+1},N_{t+1} | X_{N,t},N_{t}, D_{t,t+1})\\f$ is the transition score. Trajectory model weights ($W(\\chi)$) are normalized so that the sum of all trajectory models' weights is 1.0. Transition scores are currently based on a simple metric that either allows or disallows a transition. Transitions are only allowed if all proteins in the first snapshot model are included in the second snapshot model. In the future, we hope to include more detailed transition scoring terms, which may take into account experimental information or physical models of macromolecular dynamics.\n", + "where $t$ indexes times from 0 until the final modeled snapshot ($T$); $P(X_{N,t}, N_{t} | D_{t})$ is the snapshot model score; and $W(X_{N,t+1},N_{t+1} | X_{N,t},N_{t}, D_{t,t+1})$ is the transition score. Trajectory model weights ($W(\\chi)$) are normalized so that the sum of all trajectory models' weights is 1.0. Transition scores are currently based on a simple metric that either allows or disallows a transition. Transitions are only allowed if all proteins in the first snapshot model are included in the second snapshot model. In the future, we hope to include more detailed transition scoring terms, which may take into account experimental information or physical models of macromolecular dynamics.\n", "\n", - "### Searching for good scoring models {#notebook_trajectory_searching}\n", + "### Searching for good scoring models\n", "\n", "Trajectory models are constructed by enumerating all connections between adjacent snapshot models and scoring these trajectory models according to the equation above. This procedure results in a set of weighted trajectory models.\n", "\n", - "## Computing trajectory models\n", - "\n", + "## Computing trajectory models\n", "To compute trajectory models, we first copy all necessary files to a new directory, `data`. These files are (i) `{state}_{time}.config` files, which include the subcomplexes that are in each state, (ii) `{state}_{time}_scores.log`, which is a list of all scores of all structural models in that snapshot model, and (iii) `exp_comp{prot}.csv`, which is the experimental copy number for each protein (`{prot}`) as a function of time. Here, we copy files related to the snapshots (`*.log` files) from the `modeling` directory, as we skipped computing snapshots due to the computational expense.\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 51, "id": "bb887efe-0630-47b6-9bf1-85fa216a6816", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ".config files are copied\n", + ".csv stoichiometry files are copied\n", + "Scores for snapshot1_0min have been merged and saved\n", + "Scores for snapshot2_0min have been merged and saved\n", + "Scores for snapshot3_0min have been merged and saved\n", + "Scores for snapshot1_1min have been merged and saved\n", + "Scores for snapshot2_1min have been merged and saved\n", + "Scores for snapshot3_1min have been merged and saved\n", + "Scores for snapshot1_2min have been merged and saved\n" + ] + } + ], "source": [ "def merge_scores(fileA, fileB, outputFile):\n", " \"\"\"\n", @@ -630,10 +641,9 @@ "\n", "# copy all the relevant files for create_DAG\n", "# it is important that everything starts from main dir\n", - "main_dir = os.getcwd()\n", "os.chdir(main_dir)\n", "state_dict = {'0min': 3, '1min': 3, '2min': 1}\n", - "create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../../../modeling/Input_Information/gen_FCS/', custom_source_dir3='../../../modeling/Snapshots/Snapshots_Modeling/')\n", + "create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../modeling/Input_Information/gen_FCS/', custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", "\n", "# then trajectory model is created based on the all copied data\n", "expected_subcomplexes = ['A', 'B', 'C']\n", @@ -663,10 +673,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 52, "id": "819bb205-fa52-42f9-a4ab-d2f7c3cff0ad", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initialing graph...\n", + "Done.\n", + "Calculation composition likelihood...\n", + "Done.\n", + "Scoring directed acycling graph...\n", + "Done.\n", + "Writing output...\n", + "Done.\n" + ] + } + ], "source": [ "nodes, graph, graph_prob, graph_scores = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", " input_dir=input, scorestr='_scores.log',\n", @@ -698,13 +723,12 @@ "id": "fb4c016e-e5ba-4e3b-a76e-499750abb782", "metadata": {}, "source": [ - "\\image html Spatiotemporal_Model.png width=600px\n", "\n", - "# Trajectory modeling step 3: assessment {#notebook_trajectory_assess}\n", + "# Trajectory modeling step 3: assessment\n", "\n", "Now that the set of spatiotemporal models has been constructed, we must evaluate these models. We can evaluate these models in at least 4 ways: estimate the sampling precision, compare the model to data used to construct it, validate the model against data not used to construct it, and quantify the precision of the model.\n", "\n", - "## Sampling precision {#notebook_trajectory_sampling_precision}\n", + "## Sampling precision\n", "\n", "To begin, we calculate the sampling precision of the models. The sampling precision is calculated by using `spatiotemporal.create_DAG` to reconstruct the set of trajectory models using 2 independent sets of samplings for snapshot models. Then, the overlap between these snapshot models is evaluated using `analysis.temporal_precision`, which takes in two `labeled_pdf` files.\n", "\n", @@ -713,10 +737,54 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 53, "id": "f1436e33-81b6-400c-bf9c-7b667455265a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ".config files are copied\n", + ".csv stoichiometry files are copied\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot1_0min/good_scoring_models/scoresA.txt to ./data/1_0min_scoresA.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot1_0min/good_scoring_models/scoresB.txt to ./data/1_0min_scoresB.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot2_0min/good_scoring_models/scoresA.txt to ./data/2_0min_scoresA.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot2_0min/good_scoring_models/scoresB.txt to ./data/2_0min_scoresB.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot3_0min/good_scoring_models/scoresA.txt to ./data/3_0min_scoresA.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot3_0min/good_scoring_models/scoresB.txt to ./data/3_0min_scoresB.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot1_1min/good_scoring_models/scoresA.txt to ./data/1_1min_scoresA.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot1_1min/good_scoring_models/scoresB.txt to ./data/1_1min_scoresB.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot2_1min/good_scoring_models/scoresA.txt to ./data/2_1min_scoresA.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot2_1min/good_scoring_models/scoresB.txt to ./data/2_1min_scoresB.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot3_1min/good_scoring_models/scoresA.txt to ./data/3_1min_scoresA.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot3_1min/good_scoring_models/scoresB.txt to ./data/3_1min_scoresB.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot1_2min/good_scoring_models/scoresA.txt to ./data/1_2min_scoresA.log\n", + "Copied ../modeling/Snapshots/Snapshots_Modeling/snapshot1_2min/good_scoring_models/scoresB.txt to ./data/1_2min_scoresB.log\n", + "Initialing graph...\n", + "Done.\n", + "Calculation composition likelihood...\n", + "Done.\n", + "Scoring directed acycling graph...\n", + "Done.\n", + "Writing output...\n", + "Done.\n", + "Initialing graph...\n", + "Done.\n", + "Calculation composition likelihood...\n", + "Done.\n", + "Scoring directed acycling graph...\n", + "Done.\n", + "Writing output...\n", + "Done.\n", + "Temporal precision between ../output_modelA/labeled_pdf.txt and ../output_modelB/labeled_pdf.txt:\n", + "1.0\n", + "Step 1: calculation of temporal precision IS COMPLETED\n", + "\n", + "\n" + ] + } + ], "source": [ "## 1 - calculation of temporal precision\n", "\n", @@ -805,10 +873,11 @@ " except Exception as e:\n", " print(f\"scoresA.txt and scoresB.txt cannot be copied. Try do do it manually. Reason for Error: {e}\")\n", "\n", + "os.chdir(main_dir)\n", "# copy all the relevant files\n", - "copy_files_for_data(state_dict, custom_source_dir1='../../../modeling/Heterogeneity/Heterogeneity_Modeling/',\n", - " custom_source_dir2='../../../modeling/Input_Information/gen_FCS/',\n", - " custom_source_dir3='../../../modeling/Snapshots/Snapshots_Modeling/')\n", + "copy_files_for_data(state_dict, custom_source_dir1='../modeling/Heterogeneity/Heterogeneity_Modeling/',\n", + " custom_source_dir2='../modeling/Input_Information/gen_FCS/',\n", + " custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/')\n", "\n", "# create two independent DAGs\n", "expected_subcomplexes = ['A', 'B', 'C']\n", @@ -824,7 +893,7 @@ "nodesA, graphA, graph_probA, graph_scoresA = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", " input_dir=input, scorestr='_scoresA.log',\n", " output_dir=outputA,\n", - " spatio_temporal_rule=False,\n", + " spatio_temporal_rule=True,\n", " expected_subcomplexes=expected_subcomplexes,\n", " score_comp=True, exp_comp_map=exp_comp,\n", " draw_dag=False)\n", @@ -833,7 +902,7 @@ "nodesB, graphB, graph_probB, graph_scoresB = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3,\n", " input_dir=input, scorestr='_scoresB.log',\n", " output_dir=outputB,\n", - " spatio_temporal_rule=False,\n", + " spatio_temporal_rule=True,\n", " expected_subcomplexes=expected_subcomplexes,\n", " score_comp=True, exp_comp_map=exp_comp,\n", " draw_dag=False)\n", @@ -852,7 +921,7 @@ "id": "6ce48a52-b06c-4e09-80a5-debe0fd2cba2", "metadata": {}, "source": [ - "## Model precision {#notebook_trajectory_precision}\n", + "## Model precision\n", "\n", "Next, we calculate the precision of the model, using `analysis.precision`. Here, the model precision calculates the number of trajectory models with high weights. The precision ranges from 1.0 to 1/d, where d is the number of trajectory models. Values approaching 1.0 indicate the model set can be described by a single trajectory model, while values close to 1/d indicate that all trajectory models have similar weights.\n", "\n", @@ -861,10 +930,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 54, "id": "d3669407-4ddc-4e1f-b8ba-c8638024cd56", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Precision of ./output/labeled_pdf.txt\n", + "1.0\n", + "Step 2: calculation of precision of the model IS COMPLETED\n", + "\n", + "\n" + ] + } + ], "source": [ "## 2 - calculation of precision of the model\n", "\n", @@ -884,18 +965,18 @@ "id": "8a44c8b5-93d8-4ab8-b8cc-d6a70168c0eb", "metadata": {}, "source": [ - "## Comparison against data used in model construction {#notebook_trajectory_comparison}\n", + "## Comparison against data used in model construction\n", "\n", "We then evaluate the model against data used in model construction. First, we can calculate the cross-correlation between the original EM map and the forward density projected from each snapshot model. This calculation is too computationally expensive for this notebook, but can be found in `modeling/Trajectories/Trajectories_Assessment`, where we wrote the `ccEM` function to perform this comparison for all snapshot models.\n", "\n", - "\\code{.py}\n", - "## 3a - comparison of the model to data used in modeling (EM)\n", + "```python\n", + "# 3a - comparison of the model to data used in modeling (EM)\n", "exp_mrc_base_path = \"../../Input_Information/ET_data/add_noise\"\n", "ccEM(exp_mrc_base_path)\n", "print(\"Step 3a: ET validation IS COMPLETED\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "The results of this comparison are shown below." ] @@ -910,10 +991,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 55, "id": "f3f7c451-2b04-4e8b-b39c-16c493e8a1a6", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Step 3b: copy number validation IS COMPLETED\n", + "\n", + "\n" + ] + } + ], "source": [ "def read_labeled_pdf(pdf_file):\n", " \"\"\"\n", @@ -996,7 +1087,7 @@ " \"\"\"\n", " # find folder with config files\n", " if custom_labeled_pdf:\n", - " _labeled_pdf = custom_data_folder\n", + " _labeled_pdf = custom_labeled_pdf\n", " else:\n", " _labeled_pdf = '../Trajectories_Modeling/output/labeled_pdf.txt'\n", "\n", @@ -1037,7 +1128,7 @@ "\n", "# 3b - comparison of the model to data used in modeling (copy number)\n", "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", - "forward_model_copy_number(expected_subcomplexes)\n", + "forward_model_copy_number(expected_subcomplexes,custom_labeled_pdf='output/labeled_pdf.txt')\n", "print(\"Step 3b: copy number validation IS COMPLETED\")\n", "print(\"\")\n", "print(\"\")" @@ -1058,17 +1149,96 @@ "id": "305b6e15-1147-4706-bb26-44125961f9fb", "metadata": {}, "source": [ - "## Validation against data not used in model construction {#notebook_trajectory_validation}\n", + "## Validation against data not used in model construction\n", "\n", "Finally, we aim to compare the model to data not used in model construction. Specifically, we reserved SAXS data for model validation. We aimed to compare the forward scattering profile from the centroid structural model of each snapshot model to the experimental profile. To make this comparison, we wrote functions that converted each centroid RMF to a PDB (`convert_rmfs`), copied the experimental SAXS profiles to the appropriate folder (`copy_SAXS_dat_files`), and ran [FoXS](https://integrativemodeling.org/tutorials/foxs/foxs.html) on each PDB to evaluate its agreement to the experimental profile (`process_foxs`)." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 56, "id": "d5cc728a-6159-4317-94eb-525dccc780aa", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Finishing: snapshot1_0min.pdb\n", + "Finishing: snapshot2_0min.pdb\n", + "Finishing: snapshot3_0min.pdb\n", + "Finishing: snapshot1_1min.pdb\n", + "Finishing: snapshot2_1min.pdb\n", + "Finishing: snapshot3_1min.pdb\n", + "Finishing: snapshot1_2min.pdb\n", + "All .dat files have been copied successfully...\n", + "...lets proceed to FoXS\n", + "begin read_pdb:\n", + " WARNING No atoms were read from snapshot1_0min.pdb; perhaps it is not a PDB file.\n", + "end read_pdb\n", + "WARNING can't parse input file snapshot1_0min.pdb\n", + "FoXS for 0min is calculated and ready to create a plot. Nr of states is: 3\n", + "Plot 0min_FoXS.png is created\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "line 0: Cannot load input from 'fit.plt'\n", + "\n", + "/usr/local/bin/mv: cannot stat 'fit.plt': No such file or directory\n", + "/usr/local/bin/mv: cannot stat 'fit.png': No such file or directory\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "begin read_pdb:\n", + " WARNING No atoms were read from snapshot1_1min.pdb; perhaps it is not a PDB file.\n", + "end read_pdb\n", + "WARNING can't parse input file snapshot1_1min.pdb\n", + "FoXS for 1min is calculated and ready to create a plot. Nr of states is: 3\n", + "Plot 1min_FoXS.png is created\n", + "There is only one state in 2min\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "line 0: Cannot load input from 'fit.plt'\n", + "\n", + "/usr/local/bin/mv: cannot stat 'fit.plt': No such file or directory\n", + "/usr/local/bin/mv: cannot stat 'fit.png': No such file or directory\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "begin read_pdb:\n", + " WARNING No atoms were read from snapshot1_2min.pdb; perhaps it is not a PDB file.\n", + "end read_pdb\n", + "WARNING can't parse input file snapshot1_2min.pdb\n", + "FoXS for 2min is calculated and ready to create a plot. Nr of states is: 1\n", + "Step 4a: SAXS validation IS COMPLETED\n", + "\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "line 0: Cannot load input from 'snapshot1_2min_2min_exp.plt'\n", + "\n", + "/usr/local/bin/mv: cannot stat 'snapshot1_2min_2min_exp.plt': No such file or directory\n", + "/usr/local/bin/mv: cannot stat 'snapshot1_2min_2min_exp.png': No such file or directory\n" + ] + } + ], "source": [ "# 4a - SAXS\n", "\"\"\"\n", @@ -1099,7 +1269,7 @@ " if custom_path:\n", " sim_rmf = custom_path # option for custom path\n", " else:\n", - " sim_rmf = f\"../../../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3\"\n", + " sim_rmf = f\"../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3\"\n", "\n", " pdb_output = f\"snapshot{state}_{time}.pdb\" # define the output of converted .pdb file\n", "\n", @@ -1215,12 +1385,14 @@ "SAXS_output = \"./SAXS_comparison/\"\n", "os.makedirs(SAXS_output, exist_ok=True)\n", "os.chdir(SAXS_output)\n", + "model = IMP.Model()\n", "convert_rmfs(state_dict, model)\n", - "copy_SAXS_dat_files(custom_src_dir='../../../../modeling/Input_Information/gen_SAXS')\n", + "copy_SAXS_dat_files(custom_src_dir='../../modeling/Input_Information/gen_SAXS')\n", "process_foxs(state_dict)\n", "print(\"Step 4a: SAXS validation IS COMPLETED\")\n", "print(\"\")\n", - "print(\"\")" + "print(\"\")\n", + "os.chdir(main_dir)" ] }, { @@ -1238,15 +1410,15 @@ "source": [ "As our model was generated from synthetic data, the ground truth structure is known at each time point. In addition to validating the model by assessing its comparison to SAXS data, we could approximate the model accuracy by comparing the snapshot model to the PDB structure, although this comparison is not perfect as the PDB structure was used to inform the structure of *rigid bodies* in the snapshot model. To do so, we wrote a function (`RMSD`) that calculates the RMSD between each structural model and the orignal PDB. The function is too computationally expensive to run in this notebook, but is found in the `Trajectories/Trajectories_Assessment/` folder and is demonstrated below.\n", "\n", - "\\code{.py}\n", - "## 4b - RMSD\n", + "```python\n", + "# 4b - RMSD\n", "os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main\n", "pdb_path = \"../../Input_Information/PDB/3rpg.pdb\"\n", "RMSD(pdb_path=pdb_path, custom_n_plot=20)\n", "print(\"Step 4b: RMSD validation IS COMPLETED\")\n", "print(\"\")\n", "print(\"\")\n", - "\\endcode\n", + "```\n", "\n", "The output of this function is written in `RMSD_calculation_output`. The function outputs `rmsd_{state}_{time}.png` files, which plots the RMSD for each structural model within each snapshot model. This data is then summarized in `RMSD_analysis.txt`, which includes the minimum RMSD, average RMSD, and number of structural models in each snapshot model.\n" ] @@ -1272,7 +1444,7 @@ "id": "98f7a12c-d7e9-49ed-9a16-b713c3b4762a", "metadata": {}, "source": [ - "# Next steps {#notebook_Conclusion}\n", + "# Next steps\n", "\n", "After assessing our model, we can must decide if the model is sufficient to answer biological questions of interest. If the model does not have sufficient precision for the desired application, assessment of the current model can be used to inform which new experiments may help improve the next iteration of the model. The [integrative spatiotemporal modeling procedure](https://integrativemodeling.org/tutorials/spatiotemporal/index.html#steps) can then be repeated iteratively, analogous to [integrative modeling of static structures](https://integrativemodeling.org/2.21.0/doc/manual/intro.html#procedure).\n", "\n", @@ -1296,7 +1468,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.10.15" } }, "nbformat": 4, diff --git a/Jupyter/spatiotemporal.py b/Jupyter/spatiotemporal.py new file mode 100755 index 000000000..8a390a4d6 --- /dev/null +++ b/Jupyter/spatiotemporal.py @@ -0,0 +1,583 @@ +#!/usr/bin/env python3 + + +# General imports for the tutorial +import sys, os, glob, shutil +import IMP +import RMF +import IMP.rmf +from IMP.spatiotemporal import prepare_protein_library +import IMP.spatiotemporal as spatiotemporal +from IMP.spatiotemporal import analysis +import numpy as np +import matplotlib.pyplot as plt + +# parameters for prepare_protein_library: +times = ["0min", "1min", "2min"] +exp_comp = {'A': '../modeling/Input_Information/gen_FCS/exp_compA.csv', + 'B': '../modeling/Input_Information/gen_FCS/exp_compB.csv', + 'C': '../modeling/Input_Information/gen_FCS/exp_compC.csv'} +expected_subcomplexes = ['A', 'B', 'C'] +template_topology = '../modeling/Heterogeneity/Heterogeneity_Modeling/spatiotemporal_topology.txt' +template_dict = {'A': ['Ubi-E2-D3'], 'B': ['BMI-1'], 'C': ['E3-ubi-RING2']} +nmodels = 3 + +# calling prepare_protein_library +prepare_protein_library.prepare_protein_library(times, exp_comp, expected_subcomplexes, nmodels, + template_topology=template_topology, template_dict=template_dict) + +def merge_scores(fileA, fileB, outputFile): + """ + For each function merges scoresA.txt and scoresB.txt into {state}_{time}_scores.log + + :param fileA: path to scoresA.txt + :param fileB: path to scoresB.txt + :param outputFile: path to output merged .log file named {state}_{time}_scores.log for each snapshot. + This type of .log file is used in crete_DAG to generate trajectory model. + """ + # open both files, so data can be extracted + with open(fileA, 'r') as file_a: + data_a = file_a.readlines() + + with open(fileB, 'r') as file_b: + data_b = file_b.readlines() + + # Merge the content of both files + merged_data = data_a + data_b + + # Write the merged content into the output file + with open(outputFile, 'w') as output: + output.writelines(merged_data) + +def create_data_and_copy_files(state_dict, custom_source_dir1 = None, custom_source_dir2 = None, custom_source_dir3 = None): + """ + Copies three types of files important to generate trajectory models: + -.config files created with start_sim.py in Snapshot_Modeling (source_dir1) + -time-dependent stoichiometry data for each timepoint. Data should be presented in .csv file. With this function all + csv file in source_dir2 will be copied. These .csv files will be used in the exp_comp dictionary in create_DAG + function + -scoresA and scoresB for each snapshot created with imp sampcon exhaust + (source_dir1 + snapshot + good_scoring_models) are merged into total score .txt using merge_scores helper function. + All copied files are gathered in newly created './data/' directory, where everything is prepared for create_DAG + function. + + + :param state_dict (dict): state_dict: dictionary that defines the spatiotemporal model. + The keys are strings that correspond to each time point in the + stepwise temporal process. Keys should be ordered according to the + steps in the spatiotemporal process. The values are integers that + correspond to the number of possible states at that timepoint. + :param custom_source_dir1 (optional - str): Custom path to heterogeneity modeling dir (heterogeneity_modeling.py), + to copy .config files + :param custom_source_dir2 (optional - str): Custom path to stoichiometry data dir + :param custom_source_dir3 (optional - str): Custom path to snapshot modeling dir (start_sim.py), to copy .config + files and to access scoresA/scoresB (custom_source_dir3 + snapshot{state}_{time} + 'good_scoring_models') + """ + + # Create the destination directory if it does not exist (./data/). Here all the + destination_dir = './data/' + os.makedirs(destination_dir, exist_ok=True) + + # Path to heterogeneity modeling dir + if custom_source_dir1: + source_dir1 = custom_source_dir1 + else: + source_dir1 = '../../Heterogeneity/Heterogeneity_Modeling/' + + # Path to stoichiometry data dir + if custom_source_dir2: + source_dir2 = custom_source_dir2 + else: + source_dir2 = '../../Input_Information/gen_FCS/' + + # Path to snapshot modeling dir + if custom_source_dir3: + source_dir3 = custom_source_dir3 + else: + source_dir3 = '../../Snapshots/Snapshots_Modeling/' + + # Copy all .config files from the first source directory to the destination directory + try: + for file_name in os.listdir(source_dir1): + if file_name.endswith('.config'): + full_file_name = os.path.join(source_dir1, file_name) + if os.path.isfile(full_file_name): + shutil.copy(full_file_name, destination_dir) + print(".config files are copied") + except Exception as e: + print(f".config files cannot be copied. Try do do it manually. Reason for Error: {e}") + + # Copy all .csv stoichiometry files from the second source directory to the destination directory + try: + for file_name in os.listdir(source_dir2): + if file_name.endswith('.csv'): + full_file_name = os.path.join(source_dir2, file_name) + if os.path.isfile(full_file_name): + shutil.copy(full_file_name, destination_dir) + print(".csv stoichiometry files are copied") + except Exception as e: + print(f".csv stoichiometry files cannot be copied. Try do do it manually. Reason for Error: {e}") + + # Copy scoresA and scoresB from the snapshot_{state}_{time} directories and first source directory path + for time in state_dict.keys(): + for state in range(1, state_dict[time] + 1): + dir_name = f"snapshot{state}_{time}" + good_scoring_path = "good_scoring_models" + file_a = os.path.join(source_dir3, dir_name, good_scoring_path, "scoresA.txt") + file_b = os.path.join(source_dir3, dir_name, good_scoring_path, "scoresB.txt") + output_file = os.path.join(destination_dir, f"{state}_{time}_scores.log") # name of the output file + + try: + # Ensure the directory exists before try to read/write files + if os.path.exists(file_a) and os.path.exists(file_b): + merge_scores(file_a, file_b, output_file) # call helper function to merge files + print(f"Scores for snapshot{state}_{time} have been merged and saved") + else: # many things can go wrong here, so it is good to know where is the problem + print(f"Path doesn't exist: {source_dir3}") + print(f"Files not found in directory: {dir_name}") + print(f"Files not found in directory: {file_a}") + print(f"Files not found in directory: {file_b}") + print(f"Output directory doesn't exist: {destination_dir}") + except Exception as e: + print(f"total scores files cannot be copied of merged. Reason for Error: {e}") + +# copy all the relevant files for create_DAG +# it is important that everything starts from main dir +main_dir = os.getcwd() +os.chdir(main_dir) +state_dict = {'0min': 3, '1min': 3, '2min': 1} +create_data_and_copy_files(state_dict, custom_source_dir1=main_dir, custom_source_dir2='../modeling/Input_Information/gen_FCS/', custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/') + +# then trajectory model is created based on the all copied data +expected_subcomplexes = ['A', 'B', 'C'] +exp_comp = {'A': 'exp_compA.csv', 'B': 'exp_compB.csv', 'C': 'exp_compC.csv'} +input = './data/' +output = "../output/" + +nodes, graph, graph_prob, graph_scores = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3, + input_dir=input, scorestr='_scores.log', + output_dir=output, spatio_temporal_rule=True, + expected_subcomplexes=expected_subcomplexes, + score_comp=True, exp_comp_map=exp_comp, + draw_dag=True) + +## 1 - calculation of temporal precision + +# 1 - copy_files_for_data (copy all relevant files into 'data' directory) +def copy_files_for_data(state_dict, custom_source_dir1 = None, custom_source_dir2 = None, custom_source_dir3 = None): + """ + Copies three types of files important to generate trajectory models: + -.config files created with start_sim.py in Snapshot_Modeling (source_dir1) + -time-dependent stoichiometry data for each timepoint. Data should be presented in .csv file. With this function all + csv file in source_dir2 will be copied. These .csv files will be used in the exp_comp dictionary in create_DAG + function + -scoresA and scoresB for each snapshot created with imp sampcon exhaust + (source_dir1 + snapshot + good_scoring_models) are merged into total score .txt using merge_scores helper function. + All copied files are gathered in newly created './data/' directory, where everything is prepared for create_DAG + function. + + + :param state_dict (dict): state_dict: dictionary that defines the spatiotemporal model. + The keys are strings that correspond to each time point in the + stepwise temporal process. Keys should be ordered according to the + steps in the spatiotemporal process. The values are integers that + correspond to the number of possible states at that timepoint. + :param custom_source_dir1 (optional - str): Custom path to heterogeneity modeling dir (heterogeneity_modeling.py), + to copy .config files + :param custom_source_dir2 (optional - str): Custom path to stoichiometry data dir + :param custom_source_dir3 (optional - str): Custom path to snapshot modeling dir (start_sim.py), to copy .config + files and to access scoresA/scoresB (custom_source_dir3 + snapshot{state}_{time} + 'good_scoring_models') + """ + # Create the destination directory for all the data copied in this function + destination_dir = './data/' + os.makedirs(destination_dir, exist_ok=True) + + # path to snapshot modeling dir + if custom_source_dir1: + source_dir1 = custom_source_dir1 + else: + source_dir1 = '../../Heterogeneity/Heterogeneity_Modeling/' + + # path to stoichiometry data dir + if custom_source_dir2: + source_dir2 = custom_source_dir1 + else: + source_dir2 = '../../Input_Information/gen_FCS/' + + # path to snapshot modeling dir + if custom_source_dir3: + source_dir3 = custom_source_dir3 + else: + source_dir3 = '../../Snapshots/Snapshots_Modeling/' + + # Copy all .config files from the first source directory to the destination directory + try: + for file_name in os.listdir(source_dir1): + if file_name.endswith('.config'): + full_file_name = os.path.join(source_dir1, file_name) + if os.path.isfile(full_file_name): + shutil.copy(full_file_name, destination_dir) + print(".config files are copied") + except Exception as e: + print(f".config files cannot be copied. Try do do it manually. Reason for Error: {e}") + + # Copy all .csv stoichiometry files from the second source directory to the destination directory + try: + for file_name in os.listdir(source_dir2): + if file_name.endswith('.csv'): + full_file_name = os.path.join(source_dir2, file_name) + if os.path.isfile(full_file_name): + shutil.copy(full_file_name, destination_dir) + print(".csv stoichiometry files are copied") + except Exception as e: + print(f".csv stoichiometry files cannot be copied. Try do do it manually. Reason for Error: {e}") + + # Copy scoresA and scoresB from the snapshot_{state}_{time} directories and first source directory path + try: + for time in state_dict.keys(): + for state in range(1, state_dict[time] + 1): + snapshot_dir = os.path.join(source_dir3, f'snapshot{state}_{time}') + good_scoring_models_dir = os.path.join(snapshot_dir, 'good_scoring_models') + if os.path.isdir(good_scoring_models_dir): + for score_file in ['scoresA.txt', 'scoresB.txt']: + full_file_name = os.path.join(good_scoring_models_dir, score_file) + if os.path.isfile(full_file_name): + new_file_name = f'{state}_{time}_{os.path.splitext(score_file)[0]}.log' + shutil.copy(full_file_name, os.path.join(destination_dir, new_file_name)) + print(f"Copied {full_file_name} to {os.path.join(destination_dir, new_file_name)}") + except Exception as e: + print(f"scoresA.txt and scoresB.txt cannot be copied. Try do do it manually. Reason for Error: {e}") + +os.chdir(main_dir) +# copy all the relevant files +copy_files_for_data(state_dict, custom_source_dir1='../modeling/Heterogeneity/Heterogeneity_Modeling/', + custom_source_dir2='../modeling/Input_Information/gen_FCS/', + custom_source_dir3='../modeling/Snapshots/Snapshots_Modeling/') + +# create two independent DAGs +expected_subcomplexes = ['A', 'B', 'C'] +exp_comp = {'A': 'exp_compA.csv', 'B': 'exp_compB.csv', 'C': 'exp_compC.csv'} +input = "./data/" +outputA = "../output_modelA/" +outputB = "../output_modelB/" + +# Output from sampling precision and model precision to be saved in united dir: analysis_output_precision +analysis_output = "./analysis_output_precision/" +os.makedirs(analysis_output, exist_ok=True) + +nodesA, graphA, graph_probA, graph_scoresA = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3, + input_dir=input, scorestr='_scoresA.log', + output_dir=outputA, + spatio_temporal_rule=True, + expected_subcomplexes=expected_subcomplexes, + score_comp=True, exp_comp_map=exp_comp, + draw_dag=False) + +os.chdir(main_dir) +nodesB, graphB, graph_probB, graph_scoresB = spatiotemporal.create_DAG(state_dict, out_pdf=True, npaths=3, + input_dir=input, scorestr='_scoresB.log', + output_dir=outputB, + spatio_temporal_rule=True, + expected_subcomplexes=expected_subcomplexes, + score_comp=True, exp_comp_map=exp_comp, + draw_dag=False) + +## 1 - analysis +analysis.temporal_precision(outputA + 'labeled_pdf.txt', outputB + 'labeled_pdf.txt', + output_fn='.' + analysis_output + 'temporal_precision.txt') +os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main +print("Step 1: calculation of temporal precision IS COMPLETED") +print("") +print("") + +## 2 - calculation of precision of the model + +# precision is calculated from .labeled_pdf.txt in Trajectories_Modeling dir +trajectories_modeling_input_dir = "./output/" + +analysis.precision(trajectories_modeling_input_dir + 'labeled_pdf.txt', output_fn=analysis_output + 'precision.txt') + +os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main +print("Step 2: calculation of precision of the model IS COMPLETED") +print("") +print("") + +def read_labeled_pdf(pdf_file): + """ + Function to read in a labeled probability distribution file output by spatiotemporal.create_DAG. + Used to determine protein copy numbers by forward_model_copy_number. + :param pdf_file (str): sting for the path of the labeled probability distribution file output by + spatiotemporal.create_DAG. + :return prob_dict (dict): dictionary defining the spatiotemporal model. Each key is a state, and each value is the + probability of that state. + """ + # create blank dictonary to store the results + prob_dict = {} + # read in labeled pdf file + old = open(pdf_file, 'r') + line = old.readline() + # store the path through various nodes, as well as the probability of that path + while line: + line_split = line.split() + # assumes the first string is the trajectory string, the second string is the probability + if len(line_split) > 1: + # use # for comments + if line_split[0]=='#': + pass + else: + trj = line_split[0] + prob = float(line_split[1]) + # store in dictionary + prob_dict[trj] = prob + line = old.readline() + old.close() + return prob_dict + +def copy_number_from_state(prot_list,trj,custom_data_folder = None): + """ + For a trajectory, returns an array of protein copy numbers as a function of time. Used by + forward_model_copy_number(). + :param prot_list (list): list of proteins in the model. These proteins are searched for in each config file. + :param trj (str): string defining a single trajectory. + :param custom_data_folder (str, optional): path to custom data folder. Defaults to None, which points to '../data/' + :return _prots (array): 2D array of protein copy numbers. The first index loops over the time, + while the second index value loops over the protein (ordered as A, B, C). + :return N (int): Number of time points in each trajectory. + """ + # find folder with config files + if custom_data_folder: + data_folder = custom_data_folder + else: + data_folder = 'data/' + + # split the trajectory into a list of individual states + state_list=trj.split('|') + state_list=state_list[:-1] + + N = len(state_list) + # Map from index to protein: 0 - A, 1- B, 2- C + _prots = np.zeros((N, len(prot_list))) + + # Grab _prots from .config file + for i in range(0, N): + prot_file = data_folder + state_list[i] + '.config' + to_read = open(prot_file, 'r') + line = to_read.readline() + while line: + # for each line, check if the protein is in that line + for prot_index in range(len(prot_list)): + if prot_list[prot_index] in line: + _prots[i, prot_index] += 1 + line = to_read.readline() + + return _prots,N + +def forward_model_copy_number(prot_list,custom_labeled_pdf=None): + """ + Code to perform copy number analysis on each protein in the model. Writes output files where each row is ordered + according to the time point in the model and the first column is the mean copy number, while the second column is + the standard deviation in copy number. + :param prot_list (list): list of proteins in the model. These proteins are searched for in each config file. + :param custom_labeled_pdf (str, optional): path to custom labeled probability distribution file output by + spatiotemporal.create_DAG. + """ + # find folder with config files + if custom_labeled_pdf: + _labeled_pdf = custom_labeled_pdf + else: + _labeled_pdf = '../Trajectories_Modeling/output/labeled_pdf.txt' + + # Read in labeled_pdf file into a dictionary. Each trajectory is listed as a dictionary, + # with keys as the trajectory and the values as the probability of that trajectory + prob_dict = read_labeled_pdf(_labeled_pdf) + + # Loop over the full dictionary. Create a list with 2 values: + # 1) the probability of the state, 2) the protein copy number of that state. + key_list = prob_dict.keys() + prot_prob = [] + for key in key_list: + CN,N_times = copy_number_from_state(prot_list,key) + prot_prob.append([prob_dict[key], CN]) + + # Construct the full path to the output directory + dir_name = "forward_model_copy_number" + full_path = os.path.join(main_dir, dir_name) + os.makedirs(full_path, exist_ok=True) + os.chdir(full_path) + + # Determine copy number from the prot_prob + for index in range(len(prot_prob[0][1][0])): + copy_number = np.zeros((N_times, 2)) + # calculate mean + for state in prot_prob: + for i in range(N_times): + copy_number[i, 0] += state[0] * state[1][i][index] + # calculate std deviation + for state in prot_prob: + for i in range(N_times): + # Calculate variance + copy_number[i, 1] += state[0] * ((state[1][i][index] - copy_number[i, 0]) ** 2) + # Take square root to get the standard deviation + copy_number[:, 1] = np.sqrt(copy_number[:, 1]) + # save to file + np.savetxt('CN_prot_'+prot_list[index]+'.txt', copy_number, header='mean CN\tstd CN') + +# 3b - comparison of the model to data used in modeling (copy number) +os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main +forward_model_copy_number(expected_subcomplexes,custom_labeled_pdf='output/labeled_pdf.txt') +print("Step 3b: copy number validation IS COMPLETED") +print("") +print("") + +# 4a - SAXS +""" +Comparing center models of the most dominant cluster for each snapshot (rmfs) to the SAXS data for each time point + can be done in two steps: +-converting rmfs to pdb files +-comparing pdbs of each snapshot to experimental SAXS profile using FoXS +""" + +def convert_rmfs(state_dict, model, custom_path=None): + """ + The purpose of this function is to automate the conversion of RMF files into PDB files for all the states from + state_dict. Created PDBs are further used in comparison of SAXS profiles using FoXS. Additionally, they can be + used for comparison to native PDB if available. + + :param state_dict (dict): dictionary that defines the spatiotemporal model. + The keys are strings that correspond to each time point in the + stepwise temporal process. Keys should be ordered according to the + steps in the spatiotemporal process. The values are integers that + correspond to the number of possible states at that timepoint. + :param model (str): An IMP (Integrative Modeling Platform) model object. + :param custom_path (optional - str): A custom path for the RMF file, allowing for flexibility in file location + (should be compliant with stat_dict). + """ + + for time in state_dict.keys(): + for state in range(1, state_dict[time] + 1): + if custom_path: + sim_rmf = custom_path # option for custom path + else: + sim_rmf = f"../../modeling/Snapshots/Snapshots_Assessment/exhaust_{state}_{time}/cluster.0/cluster_center_model.rmf3" + + pdb_output = f"snapshot{state}_{time}.pdb" # define the output of converted .pdb file + + if os.path.exists(sim_rmf): + try: + rmf_fh = RMF.open_rmf_file_read_only(sim_rmf) # open rmf file for reading + rmf_hierarchy = IMP.rmf.create_hierarchies(rmf_fh, model)[0] # extract 1st hierarchy + IMP.atom.write_pdb_of_c_alphas(rmf_hierarchy, pdb_output) # write coordinates of CA to .pdb + print(f"Finishing: snapshot{state}_{time}.pdb") + except Exception as e: + print(f"{sim_rmf} is empty or there is another problem: {e}") + + +def copy_SAXS_dat_files(custom_src_dir = None): + """ + Copies all files ending with .dat from the specified directory to the current directory. + + :param custom_src_dir (optional - str): Path to the source directory + """ + if custom_src_dir: + src_dir = custom_src_dir + else: + src_dir = '../../../Input_Information/gen_SAXS' + try: + files = os.listdir(src_dir) # Get the list of all files in the src_dir directory + dat_files = [f for f in files if f.endswith('.dat')] # Filter out files that end with .dat + + # Copy each .dat file to the current directory, so FoXS can be used + for file_name in dat_files: + full_file_name = os.path.join(src_dir, file_name) + if os.path.isfile(full_file_name): + shutil.copy(full_file_name, os.getcwd()) + # print(f"Copied: {full_file_name} to {main_dir}") + + print("All .dat files have been copied successfully...") + + except Exception as e: + print(f"An error occurred: {e}") + + +def process_foxs(state_dict, custom_dat_file = None): + """ + This function automates the FoXS analysis for all specified time points in a single execution. It processes PDB + files generated by the convert_rmfs function and uses SAXS data copied with the copy_SAXS function. All of this + data should be present in the current running directory. + FoXS tutorial is available here: https://integrativemodeling.org/tutorials/foxs/foxs.html + + :param state_dict (dict): dictionary that defines the spatiotemporal model. + The keys are strings that correspond to each time point in the + stepwise temporal process. Keys should be ordered according to the + steps in the spatiotemporal process. The values are integers that + correspond to the number of possible states at that timepoint. + :param custom_dat_file (optional - str)): A custom name of SAXS files for each time point (should be + compliant with stat_dict) + """ + + + print("...lets proceed to FoXS") + + for time in state_dict.keys(): + try: + if state_dict[time] > 1: + # if there is more than one state in timepoint, FoXS creates fit.plt and it should be renamed + if custom_dat_file: + dat_file = custom_dat_file + else: + dat_file = f"{time}_exp.dat" + + pdb_files = " ".join([f"snapshot{state}_{time}.pdb" for state in range(1, state_dict[time] + 1)]) + + command1 = f"foxs -r -g {pdb_files} {dat_file}" + # example how FoXS command should look like: foxs -r -g snapshot1_0min.pdb snapshot2_0min.pdb snapshot3_0min.pdb 0min_exp.dat + os.system(command1) + print(f"FoXS for {time} is calculated and ready to create a plot. Nr of states is: {state_dict[time]}") + + command2 = f"gnuplot fit.plt" # create plot from .plt code + os.system(command2) + + command3 = f"mv fit.plt {time}_FoXS.plt" # rename .plt to avoid to be overwritten + os.system(command3) + + command4 = f"mv fit.png {time}_FoXS.png" # rename plot to avoid to be overwritten + os.system(command4) + + print(f"Plot {time}_FoXS.png is created") + + elif state_dict[time] == 1: + print(f"There is only one state in {time}") + dat_file1 = f"{time}_exp.dat" + pdb_file1 = f"snapshot1_{time}.pdb" + + command5 = f"foxs -r -g {pdb_file1} {dat_file1}" + os.system(command5) + print(f"FoXS for {time} is calculated and ready to create a plot. Nr of states is: {state_dict[time]}") + + command6 = f"gnuplot snapshot1_{time}_{time}_exp.plt" + os.system(command6) + + command7 = f"mv snapshot1_{time}_{time}_exp.plt {time}_FoXS.plt" + os.system(command7) + + command8 = f"mv snapshot1_{time}_{time}_exp.png {time}_FoXS.png" + os.system(command8) + else: + print(f"There is no states in this timepoint. Check stat_dict.") + + except Exception as e: + print(f"FoXS can not be executed properly due to following problem: {e}") + + +# 4a - SAXS +os.chdir(main_dir) # it is crucial that after each step, directory is changed back to main +SAXS_output = "./SAXS_comparison/" +os.makedirs(SAXS_output, exist_ok=True) +os.chdir(SAXS_output) +model = IMP.Model() +convert_rmfs(state_dict, model) +copy_SAXS_dat_files(custom_src_dir='../../modeling/Input_Information/gen_SAXS') +process_foxs(state_dict) +print("Step 4a: SAXS validation IS COMPLETED") +print("") +print("")