From beef1be3c6b25a3b2539a1cbc8dac48977f3ad1b Mon Sep 17 00:00:00 2001 From: Harshal Pohekar <106588300+hpohekar@users.noreply.github.com> Date: Fri, 24 May 2024 19:42:17 +0530 Subject: [PATCH] feat: Enhanced search (#2816) * feat: Enhanced search * test update * working * update signature and add warnings * improvements * add tests * update doc * resolve conflicts * resolve conflicts 1 * doc fix 1 * update signature * update test * update test and search * remove results * update doc and fix tests * update __init__ * update doc 2 * update exception * change order of arguments * _write_api_tree_file * update _process_wildcards * remove extra set * remove file in spellchecker * use rstrip * update condition * update condition and warning * update nltk.download * sys version * update __init__ and test_search * rstrip fix * rstrip fix 1 * rstrip fix 2 * add else block * update allapigen.py * add script execution pyproject.toml * restructuring 1 * remove flag * update allapigen.py * update names, signature, _process_misspelled * download nltk_data, handle internet error * update env var * update nltk.data.path * separate functions * restructuring and test fixes * generate fix * improve handling * improve handling and add more tests * add more tests 1 * add 242 marker * remove wildcard flag * Revert "remove wildcard flag" This reverts commit cb097755511d8f7016e84b293bbad24ff70bc1c4. * add files to package * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> * doc refactor 1 * add test for nltk download * update interface and remove scikit-learn * update interface warnings * remove exception test * integrate search_root * update tests * update tests 1 * update tests 2 * updat return type * add tests for exact and match case * update capitalize doc * update logic and names * update match_case condition * update match_case condition * Update src/ansys/fluent/core/utils/search.py Co-authored-by: Sean Pearson <93727996+seanpearsonuk@users.noreply.github.com> * update _get_wildcard.. * update search doc match_case * single json file * download nltk * update download_nltk * update allapigen.py * update allapigen.py 1 * error fix * error fix 1 * error fix 2 * error fix 3 * error fix 4 * error fix 5 * error fix 6 --------- Co-authored-by: Kathy Pippert <84872299+PipKat@users.noreply.github.com> Co-authored-by: Sean Pearson <93727996+seanpearsonuk@users.noreply.github.com> --- codegen/allapigen.py | 2 + pyproject.toml | 2 + src/ansys/fluent/core/utils/search.py | 514 +++++++++++++++++++++++++- tests/test_search.py | 377 +++++++++++++++++-- 4 files changed, 864 insertions(+), 31 deletions(-) diff --git a/codegen/allapigen.py b/codegen/allapigen.py index 7c4ebc2846f..6a2e0dd4a7e 100644 --- a/codegen/allapigen.py +++ b/codegen/allapigen.py @@ -5,6 +5,7 @@ from ansys.fluent.core import CODEGEN_OUTDIR, FluentMode, FluentVersion, launch_fluent from ansys.fluent.core.codegen import StaticInfoType, allapigen, print_fluent_version from ansys.fluent.core.utils.fluent_version import get_version_for_file_name +from ansys.fluent.core.utils.search import _search if __name__ == "__main__": t0 = time() @@ -59,3 +60,4 @@ allapigen.generate(version, static_infos) t2 = time() print(f"Time to generate APIs: {t2 - t1:.2f} seconds") + _search("", version=version) diff --git a/pyproject.toml b/pyproject.toml index 32ee0ebf33d..d001b7ea2d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,6 +27,7 @@ include = [ { path = "src/ansys/fluent/core/generated/solver/settings_*/*.py", format = ["sdist", "wheel"] }, { path = "src/ansys/fluent/core/generated/solver/settings_*/*.pyi", format = ["sdist", "wheel"] }, { path = "src/ansys/fluent/core/generated/*.pickle", format = ["sdist", "wheel"] }, + { path = "src/ansys/fluent/core/generated/api_tree/*.txt", format = ["sdist", "wheel"] }, ] packages = [ @@ -49,6 +50,7 @@ grpcio = "^1.30.0" grpcio-health-checking = "^1.30.0" h5py = { version = "==3.11.0", optional = true } lxml = "^4.9.2" +nltk = "^3.8.1" numpy= "^1.21.5" pandas = "^2.0.3" platformdirs = "^3.5.1" diff --git a/src/ansys/fluent/core/utils/search.py b/src/ansys/fluent/core/utils/search.py index 0adb4028e5d..c18e2af666c 100644 --- a/src/ansys/fluent/core/utils/search.py +++ b/src/ansys/fluent/core/utils/search.py @@ -1,15 +1,24 @@ """Provides a module to search a word through the Fluent's object hierarchy..""" from collections.abc import Mapping +import fnmatch +import json +import os from pathlib import Path import pickle +import re +import sys from typing import Any, Optional +import warnings + +from nltk.corpus import wordnet as wn from ansys.fluent.core.services.datamodel_se import PyMenu, PyNamedObjectContainer from ansys.fluent.core.services.datamodel_tui import TUIMenu from ansys.fluent.core.session_pure_meshing import PureMeshing from ansys.fluent.core.session_solver import Solver from ansys.fluent.core.solver import flobject +from ansys.fluent.core.solver.error_message import closest_allowed_names from ansys.fluent.core.utils.fluent_version import ( FluentVersion, get_version_for_file_name, @@ -22,6 +31,13 @@ ) +def _get_api_tree_data_file(): + """Get API tree data file.""" + codegen_outdir = (Path(__file__) / ".." / ".." / "generated").resolve() + text_file_folder = Path(os.path.join(codegen_outdir, "api_tree")) + return (text_file_folder / "api_objects.json").resolve() + + def get_api_tree_file_name(version: str) -> Path: """Get API tree file name.""" from ansys.fluent.core import CODEGEN_OUTDIR @@ -126,7 +142,7 @@ def _get_version_path_prefix_from_obj(obj: Any): return version, path, prefix -def search( +def _search( word: str, match_whole_word: bool = False, match_case: bool = False, @@ -138,18 +154,18 @@ def search( Parameters ---------- word : str - The word to search for. + Word to search for. match_whole_word : bool, optional Whether to match whole word, by default False match_case : bool, optional Whether to match case, by default False version : str, optional - Fluent version to search in, by default None in which case - it will search in the latest version for which codegen was run. + Fluent version to search in. The default is ``None``. If ``None``, + it searches in the latest version for which codegen was run. search_root : Any, optional - The root object within which the search will be performed, - can be a session object or any API object within a session, - by default None in which case it will search everything. + The root object within which the search is performed. + It can be a session object or any API object within a session. + The default is ``None``. If ``None``, it searches everything. Examples -------- @@ -170,6 +186,9 @@ def search( .results.graphics.mesh[""].geometry (Parameter) .results.graphics.contour[""].geometry (Parameter) """ + api_objects = [] + api_tui_objects = [] + api_object_names = set() if version: version = get_version_for_file_name(version) root_version, root_path, prefix = _get_version_path_prefix_from_obj(search_root) @@ -224,10 +243,489 @@ def inner(tree, path, root_path): next_path = f'{path}.{k}[""]' else: next_path = f"{path}.{k}" + type_ = "Object" if isinstance(v, Mapping) else v + api_object_names.add(k) + if "tui" in next_path: + api_tui_objects.append(f"{next_path} ({type_})") + else: + api_objects.append(f"{next_path} ({type_})") if _match(k, word, match_whole_word, match_case): - type_ = "Object" if isinstance(v, Mapping) else v print(f"{next_path} ({type_})") if isinstance(v, Mapping): inner(v, next_path, root_path) inner(api_tree, "", root_path) + + api_tree_data = dict() + api_tree_data["api_objects"] = sorted(api_objects) + api_tree_data["api_tui_objects"] = sorted(api_tui_objects) + + def _write_api_tree_file(api_tree_data: dict, api_object_names: list): + _download_nltk_data() + from ansys.fluent.core import CODEGEN_OUTDIR + + json_file_folder = Path(os.path.join(CODEGEN_OUTDIR, "api_tree")) + json_file_folder.mkdir(parents=True, exist_ok=True) + + all_api_object_name_synsets = dict() + for name in api_object_names: + api_object_name_synsets = ( + wn.synsets(name.decode("utf-8"), lang="eng") + if sys.version_info[0] < 3 + else wn.synsets(name, lang="eng") + ) + synset_names = [] + for api_object_name_synset in api_object_name_synsets: + synset_names.append(api_object_name_synset.name().split(".")[0]) + all_api_object_name_synsets[name] = synset_names + api_tree_data["all_api_object_name_synsets"] = all_api_object_name_synsets + + api_tree_file = _get_api_tree_data_file() + api_tree_file.touch() + with open(api_tree_file, "w") as json_file: + json.dump(api_tree_data, json_file) + + _write_api_tree_file( + api_tree_data=api_tree_data, api_object_names=list(api_object_names) + ) + + +def _get_api_tree_data(): + """Get API tree data.""" + api_tree_data_file = _get_api_tree_data_file() + if api_tree_data_file.exists(): + json_file = open(api_tree_data_file, "r") + api_tree_data = json.load(json_file) + return api_tree_data + + +api_tree_data = _get_api_tree_data() + +try: + api_object_names = list(api_tree_data["all_api_object_name_synsets"].keys()) +except TypeError: + api_object_names = [] + + +def _print_search_results(queries: list, api_tree_data: dict): + """Print search results. + + Parameters + ---------- + queries: list + List of search string to match API object names. + api_tree_data: dict + All API object data. + """ + api_tree_datas = [api_tree_data["api_objects"], api_tree_data["api_tui_objects"]] + for api_tree_data in api_tree_datas: + for query in queries: + for api_object in api_tree_data: + if query in api_object: + print(api_object) + + +def _get_wildcard_matches_for_word_from_names(word: str, names: list): + """Get wildcard matches for the given word. + + Parameters + ---------- + word: str + Word to search for. + names: list + All API object names. + + Returns + ------- + wildcard_matches: list + Matched API object names. + """ + pattern = fnmatch.translate(word) + regex = re.compile(pattern) + return [name for name in names if regex.match(name)] + + +def _search_wildcard(search_string: str, api_object_names: list): + """Perform wildcard search for a word through the Fluent's object hierarchy. + + Parameters + ---------- + search_string: str + Word to search for. Semantic search is default. + api_object_names: list + All API object names. + + Returns + ------- + List of search string matches. + """ + queries = _get_wildcard_matches_for_word_from_names(search_string, api_object_names) + if queries: + _print_search_results(queries, api_tree_data=api_tree_data) + + +def _get_exact_match_for_word_from_names( + word: str, + names: list, +): + """Get exact match for the given word. + + Parameters + ---------- + word: str + Word to search for. + names: list + All API object names. + + Returns + ------- + List of exact match. + """ + return [name for name in names if word == name] + + +def _get_capitalize_match_for_word_from_names( + word: str, + names: list, +): + """Get API object name if capitalize case of the given word is present in the API + object name. + + Parameters + ---------- + word: str + Word to search for. + names: list + All API object names. + + Returns + ------- + List of API object names containing capitalize case of the given word. + """ + return [name for name in names if word.capitalize() in name] + + +def _get_match_case_for_word_from_names( + word: str, + names: list, +): + """Get API object name if the given word is present in the API object name. + + Parameters + ---------- + word: str + Word to search for. + names: list + All API object names. + + Returns + ------- + List of API object names containing the given word. + """ + return [name for name in names if word in name] + + +def _get_close_matches_for_word_from_names( + word: str, + names: list, +): + """Get close matches for the given word. + + Parameters + ---------- + word: str + Word to search for. + names: list + All API object names. + + Returns + ------- + valid_close_matches: list + List of valid close matches. + """ + close_matches = closest_allowed_names(word, names) + valid_close_matches = [ + close_match for close_match in close_matches if close_match in names + ] + return valid_close_matches + + +def _search_whole_word( + search_string: str, + match_case: bool = False, + match_whole_word: bool = False, + api_object_names: list = None, +): + """Perform exact search for a word through the Fluent's object hierarchy. + + Parameters + ---------- + search_string: str + Word to search for. Semantic search is default. + match_case: bool + Whether to match case. The default is ``False``. + If ``True``, it matches the given word. + match_whole_word: bool + Whether to match whole word. The default is ``False``. + If ``True``, it matches the given word, and it's capitalize case. + api_object_names: list + All API object names. + + Returns + ------- + List of search string matches. + """ + queries = [] + if match_case and match_whole_word: + queries.extend( + _get_exact_match_for_word_from_names(search_string, api_object_names) + ) + elif match_case: + queries.extend( + _get_match_case_for_word_from_names(search_string, api_object_names) + ) + elif match_whole_word: + for word in [search_string, search_string.capitalize()]: + queries.extend(_get_exact_match_for_word_from_names(word, api_object_names)) + elif not match_case and not match_whole_word: + queries.extend( + _get_capitalize_match_for_word_from_names(search_string, api_object_names) + ) + queries.extend( + _get_match_case_for_word_from_names(search_string, api_object_names) + ) + if queries: + _print_search_results(queries, api_tree_data=api_tree_data) + + +def _download_nltk_data(): + """Download NLTK data on demand.""" + import nltk + + packages = ["wordnet", "omw-1.4"] + for package in packages: + nltk.download( + package, + quiet=True, + raise_on_error=True, + ) + + +def _search_semantic(search_string: str, language: str, api_tree_data: dict): + """Perform semantic search for a word through the Fluent's object hierarchy. + + Parameters + ---------- + search_string: str + Word to search for. Semantic search is the default. + language: str + ISO 639-3 code for the language to use for the semantic search. + The default is ``eng`` for English. For the list of supported languages, + see `OMW Version 1 `_. + api_tree_data: dict + All API object data. + + Returns + ------- + queries: list + List of search string matches. + """ + similar_keys = set() + search_string_synsets = ( + wn.synsets(search_string.decode("utf-8"), lang=language) + if sys.version_info[0] < 3 + else wn.synsets(search_string, lang=language) + ) + for api_object_name, api_object_synset_names in list( + api_tree_data["all_api_object_name_synsets"].items() + ): + for search_string_synset in search_string_synsets: + for api_object_synset_name in api_object_synset_names: + search_string_synset_name = search_string_synset.name().split(".")[0] + if ( + search_string in api_object_synset_name + or search_string_synset_name in api_object_synset_name + ): + similar_keys.add(api_object_synset_name + "*") + if similar_keys: + for key in similar_keys: + _search_wildcard( + key, + api_object_names=list( + api_tree_data["all_api_object_name_synsets"].keys() + ), + ) + else: + queries = _get_close_matches_for_word_from_names( + search_string, api_object_names + ) + if queries: + _print_search_results(queries, api_tree_data=api_tree_data) + + +def search( + search_string: str, + language: Optional[str] = "eng", + wildcard: Optional[bool] = False, + match_whole_word: Optional[bool] = False, + match_case: Optional[bool] = True, + search_root: Optional[Any] = None, +): + """Search for a word through the Fluent's object hierarchy. + + Parameters + ---------- + search_string: str + Word to search for. Semantic search is the default. + language: str + ISO 639-3 code for the language to use for the semantic search. + The default is ``eng`` for English. For the list of supported languages, + see `OMW Version 1 `_. + wildcard: bool, optional + Whether to use the wildcard pattern. The default is ``False``. If ``True``, the + wildcard pattern is based on the ``fnmatch`` module and semantic matching + is turned off. + match_whole_word: bool, optional + Whether to find only exact matches. The default is ``False``. If ``True``, + only exact matches are found and semantic matching is turned off. + match_case: bool, optional + Whether to match case. The default is ``True``. If ``False``, the search is case-insensitive. + search_root : Any, optional + The root object within which the search is performed, + can be a session object or any API object within a session. + The default is ``None``. If ``None``, it searches everything. + + Examples + -------- + >>> import ansys.fluent.core as pyfluent + >>> pyfluent.search("font", match_whole_word=True) + >>> pyfluent.search("Font") + >>> pyfluent.search("iter*", wildcard=True) + >>> pyfluent.search("读", language="cmn") # search 'read' in Chinese + The most similar API objects are: + .file.read (Command) + .file.import_.read (Command) + .mesh.surface_mesh.read (Command) + .tui.display.display_states.read (Command) + .tui.display.display_states.read (Command) + """ + if (wildcard and match_whole_word) or (wildcard and match_case): + warnings.warn( + "``wildcard=True`` matches wildcard pattern.", + UserWarning, + ) + elif language and wildcard: + warnings.warn( + "``wildcard=True`` matches wildcard pattern.", + UserWarning, + ) + elif language and match_whole_word: + warnings.warn( + "``match_whole_word=True`` matches the given word, and it's capitalize case.", + UserWarning, + ) + elif match_whole_word: + warnings.warn( + "``match_whole_word=True`` matches the given word, and it's capitalize case.", + UserWarning, + ) + elif match_case: + warnings.warn( + "``match_case=True`` matches the given word.", + UserWarning, + ) + + if wildcard: + _search_wildcard( + search_string, + api_object_names=api_object_names, + ) + elif match_whole_word: + if not match_case: + _search_whole_word( + search_string, + match_whole_word=True, + api_object_names=list( + api_tree_data["all_api_object_name_synsets"].keys() + ), + ) + else: + _search_whole_word( + search_string, + match_case=True, + api_object_names=list( + api_tree_data["all_api_object_name_synsets"].keys() + ), + ) + elif search_root: + version = None + root_version, root_path, prefix = _get_version_path_prefix_from_obj(search_root) + if search_root and not prefix: + return + if not version: + version = root_version + if not version: + for fluent_version in FluentVersion: + version = get_version_for_file_name(fluent_version.value) + if get_api_tree_file_name(version).exists(): + break + api_tree_file = get_api_tree_file_name(version) + with open(api_tree_file, "rb") as f: + api_tree = pickle.load(f) + + if isinstance(search_root, (flobject.Group, flobject.NamedObject)): + path = root_path + [ + flobject.to_python_name(x) for x in search_root.path.split("/") + ] + root_path = [] + tree = api_tree + while path: + p = path.pop(0) + if p in tree: + tree = tree[p] + root_path.append(p) + elif f"{p}:" in tree: + tree = tree[f"{p}:"] + root_path.append(f"{p}:") + if path: + path.pop(0) + else: + return + + def inner(tree, path, root_path): + if root_path: + path = prefix + while root_path: + p = root_path.pop(0) + if p in tree: + tree = tree[p] + else: + return + + for k, v in tree.items(): + if k in ("", ""): + next_path = k + else: + if k.endswith(":"): + k = _remove_suffix(k, ":") + next_path = f'{path}.{k}[""]' + else: + next_path = f"{path}.{k}" + type_ = "Object" if isinstance(v, Mapping) else v + if _match( + k, + search_string, + match_whole_word=False, + match_case=False, + ): + print(f"{next_path} ({type_})") + if isinstance(v, Mapping): + inner(v, next_path, root_path) + + inner(api_tree, "", root_path) + else: + try: + _search_semantic(search_string, language, api_tree_data=api_tree_data) + except LookupError: + _download_nltk_data() + _search_semantic(search_string, language, api_tree_data=api_tree_data) diff --git a/tests/test_search.py b/tests/test_search.py index c4cece12e0f..f0f1210d87b 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -4,12 +4,347 @@ from util.solver_workflow import new_solver_session # noqa: F401 import ansys.fluent.core as pyfluent -from ansys.fluent.core.utils.search import _get_version_path_prefix_from_obj +from ansys.fluent.core.utils.search import ( + _get_api_tree_data, + _get_capitalize_match_for_word_from_names, + _get_close_matches_for_word_from_names, + _get_exact_match_for_word_from_names, + _get_match_case_for_word_from_names, + _get_version_path_prefix_from_obj, + _get_wildcard_matches_for_word_from_names, + _search, + _search_semantic, + _search_whole_word, + _search_wildcard, +) + +api_tree_data = _get_api_tree_data() + +try: + api_object_names = list(api_tree_data["all_api_object_name_synsets"].keys()) +except TypeError: + api_object_names = [] + + +@pytest.mark.fluent_version("==24.2") +def test_nltk_data_download(): + import nltk + + packages = ["wordnet", "omw-1.4"] + for package in packages: + nltk.download(package, quiet=True) + + _search_semantic("读", language="cmn", api_tree_data=api_tree_data) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_get_exact_match_for_word_from_names(): + exact_match = _get_exact_match_for_word_from_names( + "VideoResoutionY", + names=api_object_names, + ) + assert "VideoResoutionY" in exact_match + assert len(exact_match) == 1 + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_get_capitalize_match_for_word_from_names(): + capitalize_match_cases = _get_capitalize_match_for_word_from_names( + "font", + names=api_object_names, + ) + assert "font" not in capitalize_match_cases + assert set(capitalize_match_cases) == set( + [ + "TextFontAutomaticHorizontalSize", + "TextFontName", + "TextFontFixedHorizontalSize", + "TextFontFixedSize", + "TextFontAutomaticSize", + "TextFontFixedVerticalSize", + "TextFontAutomaticVerticalSize", + "ApplicationFontSize", + "TextFontFixedUnits", + "TextFontAutomaticUnits", + "Font", + ] + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_get_match_case_for_word_from_names(): + match_cases = _get_match_case_for_word_from_names( + "font", + names=api_object_names, + ) + for match_case in match_cases: + assert "Font" not in match_case + assert "font" in match_case + assert set(match_cases) == set( + [ + "text_font_fixed_units", + "text_font_automatic_horizontal_size", + "font_name", + "font_size", + "text_font_fixed_size", + "label_font", + "text_font_fixed_vertical_size", + "text_font_automatic_vertical_size", + "text_font_automatic_units", + "font", + "text_font_automatic_size", + "text_font_fixed_horizontal_size", + "application_font_size", + "font_automatic", + "text_font_name", + ] + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_get_wildcard_matches_for_word_from_names(): + wildcard_matches = _get_wildcard_matches_for_word_from_names( + "iter*", + names=api_object_names, + ) + assert set(wildcard_matches) == set( + [ + "iter_count", + "iterating", + "iter_per_coupling_count", + "iteration_at_creation_or_edit", + "iteration_interval", + "iteration_number_of_samples_or_levels", + "iterations", + "iterate", + "iterate_steady_2way_fsi", + "iteration", + "iteration_sampling_type", + "iteration_count", + "iteration_parameters", + ] + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_get_close_matches_for_word_from_names(): + close_matches = _get_close_matches_for_word_from_names( + "font", + names=api_object_names, + ) + assert "font" in close_matches + + close_matches = _get_close_matches_for_word_from_names( + "fnt", + names=api_object_names, + ) + assert "font" in close_matches + + close_matches = _get_close_matches_for_word_from_names( + "solve_flow", + names=api_object_names, + ) + assert "solve_flow_last" in close_matches + + close_matches = _get_close_matches_for_word_from_names( + "sunshine", + names=api_object_names, + ) + assert "sunshine_factor" in close_matches + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_search_wildcard(capsys): + _search_wildcard( + "max*", + api_object_names=api_object_names, + ) + lines = capsys.readouterr().out.splitlines() + assert ( + ".solution.run_calculation.cfl_based_adaptive_time_stepping.max_fixed_time_step (Parameter)" + in lines + ) + + _search_wildcard( + "min*", + api_object_names=api_object_names, + ) + lines = capsys.readouterr().out.splitlines() + assert ".solution.controls.limits.min_des_tke (Parameter)" in lines + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_search_whole_word(capsys): + _search_whole_word( + "RemovePartitionLinesTolerance", + match_case=False, + api_object_names=api_object_names, + ) + lines = capsys.readouterr().out.splitlines() + assert ( + ".preferences.Graphics.RemovePartitionLinesTolerance (Parameter)" + in lines + ) + + _search_whole_word( + "k0_sei", + match_case=False, + api_object_names=api_object_names, + ) + lines = capsys.readouterr().out.splitlines() + assert ( + ".setup.models.battery.tool_kits.standalone_echem_model.k0_sei (Parameter)" + in lines + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_search_semantic(capsys): + _search_semantic("读", language="cmn", api_tree_data=api_tree_data) + lines = capsys.readouterr().out.splitlines() + assert ".file.read_surface_mesh (Command)" in lines + + _search_semantic("フォント", language="jpn", api_tree_data=api_tree_data) + lines = capsys.readouterr().out.splitlines() + assert ".tui.preferences.appearance.charts.font (Object)" in lines + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_whole_word_search(capsys): + pyfluent.search("Font", match_whole_word=True) + lines = capsys.readouterr().out.splitlines() + assert "font" not in lines + assert ".preferences.Appearance.Charts.Font (Object)" in lines + assert ( + ".preferences.Graphics.ColormapSettings.TextFontAutomaticUnits (Parameter)" + in lines + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_match_case_search(capsys): + pyfluent.search("font", match_case=True) + lines = capsys.readouterr().out.splitlines() + for line in lines: + assert "Font" not in line + assert "font" in line + assert ( + '.results.graphics.pathline[""].color_map.font_name (Parameter)' + in lines + ) + assert ( + '.results.graphics.vector[""].color_map.font_automatic (Parameter)' + in lines + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_match_whole_word_and_case_search(capsys): + pyfluent.search("font", match_whole_word=True, match_case=True) + lines = capsys.readouterr().out.splitlines() + for line in lines: + assert "font" in line + assert "Font" not in line + assert ".preferences.Appearance.Charts.Font (Object)" not in lines + assert ( + ".preferences.Graphics.ColormapSettings.TextFontAutomaticUnits (Parameter)" + not in lines + ) + assert ( + '.results.graphics.lic[""].color_map.font_name (Parameter)' + in lines + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_misspelled_search(capsys): + pyfluent.search("cfb_lma") + lines = capsys.readouterr().out.splitlines() + assert ( + ".setup.models.viscous.geko_options.cbf_lam (Parameter)" + in lines + ) + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_wildcard_search(capsys): + pyfluent.search("iter*", wildcard=True) + lines = capsys.readouterr().out.splitlines() + assert ".solution.run_calculation.iter_count (Parameter)" in lines + assert ".solution.run_calculation.iterating (Query)" in lines + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_chinese_semantic_search(capsys): + pyfluent.search("读", language="cmn") + lines = capsys.readouterr().out.splitlines() + assert ".file.read_case (Command)" in lines + + pyfluent.search("写", language="cmn") + lines = capsys.readouterr().out.splitlines() + assert ".file.write_case (Command)" in lines + + +@pytest.mark.fluent_version("==24.2") +@pytest.mark.codegen_required +def test_japanese_semantic_search(capsys): + pyfluent.search("フォント", language="jpn") + lines = capsys.readouterr().out.splitlines() + assert ".tui.preferences.appearance.charts.font (Object)" in lines + + +@pytest.mark.codegen_required +@pytest.mark.fluent_version("==24.2") +def test_search_from_root_latest(capsys, new_watertight_workflow_session): + meshing = new_watertight_workflow_session + pyfluent.search("display", search_root=meshing) + lines = capsys.readouterr().out.splitlines() + assert ".tui.display (Object)" in lines + pyfluent.search("display", search_root=meshing.tui) + lines = capsys.readouterr().out.splitlines() + assert ".display (Object)" in lines + pyfluent.search("display", search_root=meshing.tui.display) + lines = capsys.readouterr().out.splitlines() + assert ".update_scene.display (Command)" in lines + assert ".display_states (Object)" in lines + pyfluent.search("cad", search_root=meshing.meshing) + lines = capsys.readouterr().out.splitlines() + assert ".GlobalSettings.EnableCleanCAD (Parameter)" in lines + assert ".LoadCADGeometry (Command)" in lines + pyfluent.search("next", search_root=meshing.workflow) + lines = capsys.readouterr().out.splitlines() + assert '.TaskObject[""].InsertNextTask (Command)' in lines + pyfluent.search("next", search_root=meshing.workflow.TaskObject) + lines = capsys.readouterr().out.splitlines() + assert '[""].InsertNextTask (Command)' in lines + pyfluent.search("next", search_root=meshing.workflow.TaskObject["Import Geometry"]) + lines = capsys.readouterr().out.splitlines() + assert ".InsertNextTask (Command)" in lines + pyfluent.search("timeout", search_root=meshing.preferences) + lines = capsys.readouterr().out.splitlines() + assert ".General.IdleTimeout (Parameter)" in lines + pyfluent.search("timeout", search_root=meshing.preferences.General) + lines = capsys.readouterr().out.splitlines() + assert ".IdleTimeout (Parameter)" in lines @pytest.mark.codegen_required def test_search(capsys): - pyfluent.search("display") + _search("display") lines = capsys.readouterr().out.splitlines() assert ".tui.display (Object)" in lines assert ".tui.display.update_scene.display (Command)" in lines @@ -23,7 +358,7 @@ def test_search(capsys): in lines ) - pyfluent.search("display", match_whole_word=True) + _search("display", match_whole_word=True) lines = capsys.readouterr().out.splitlines() assert '.results.graphics.mesh[""].display (Command)' in lines assert ( @@ -31,7 +366,7 @@ def test_search(capsys): not in lines ) - pyfluent.search("Display", match_case=True) + _search("Display", match_case=True) lines = capsys.readouterr().out.splitlines() assert ".tui.display (Object)" not in lines assert ( @@ -39,9 +374,7 @@ def test_search(capsys): in lines ) - pyfluent.search( - "GraphicsWindowDisplayTimeout", match_whole_word=True, match_case=True - ) + _search("GraphicsWindowDisplayTimeout", match_whole_word=True, match_case=True) lines = capsys.readouterr().out.splitlines() assert ( ".preferences.Graphics.MeshingMode.GraphicsWindowDisplayTimeout (Parameter)" @@ -129,33 +462,33 @@ def test_get_version_path_prefix_from_obj( @pytest.mark.fluent_version("latest") def test_search_from_root(capsys, new_watertight_workflow_session): meshing = new_watertight_workflow_session - pyfluent.search("display", search_root=meshing) + _search("display", search_root=meshing) lines = capsys.readouterr().out.splitlines() assert ".tui.display (Object)" in lines - pyfluent.search("display", search_root=meshing.tui) + _search("display", search_root=meshing.tui) lines = capsys.readouterr().out.splitlines() assert ".display (Object)" in lines - pyfluent.search("display", search_root=meshing.tui.display) + _search("display", search_root=meshing.tui.display) lines = capsys.readouterr().out.splitlines() assert ".update_scene.display (Command)" in lines assert ".display_states (Object)" in lines - pyfluent.search("cad", search_root=meshing.meshing) + _search("cad", search_root=meshing.meshing) lines = capsys.readouterr().out.splitlines() assert ".GlobalSettings.EnableCleanCAD (Parameter)" in lines assert ".LoadCADGeometry (Command)" in lines - pyfluent.search("next", search_root=meshing.workflow) + _search("next", search_root=meshing.workflow) lines = capsys.readouterr().out.splitlines() assert '.TaskObject[""].InsertNextTask (Command)' in lines - pyfluent.search("next", search_root=meshing.workflow.TaskObject) + _search("next", search_root=meshing.workflow.TaskObject) lines = capsys.readouterr().out.splitlines() assert '[""].InsertNextTask (Command)' in lines - pyfluent.search("next", search_root=meshing.workflow.TaskObject["Import Geometry"]) + _search("next", search_root=meshing.workflow.TaskObject["Import Geometry"]) lines = capsys.readouterr().out.splitlines() assert ".InsertNextTask (Command)" in lines - pyfluent.search("timeout", search_root=meshing.preferences) + _search("timeout", search_root=meshing.preferences) lines = capsys.readouterr().out.splitlines() assert ".General.IdleTimeout (Parameter)" in lines - pyfluent.search("timeout", search_root=meshing.preferences.General) + _search("timeout", search_root=meshing.preferences.General) lines = capsys.readouterr().out.splitlines() assert ".IdleTimeout (Parameter)" in lines @@ -164,31 +497,29 @@ def test_search_from_root(capsys, new_watertight_workflow_session): @pytest.mark.fluent_version("==23.2") def test_search_settings_from_root(capsys, load_static_mixer_settings_only): solver = load_static_mixer_settings_only - pyfluent.search("conduction", search_root=solver) + _search("conduction", search_root=solver) lines = capsys.readouterr().out.splitlines() assert ".tui.define.models.shell_conduction (Object)" in lines assert ( '.setup.boundary_conditions.wall[""].phase[""].shell_conduction[""] (Object)' in lines ) - pyfluent.search("conduction", search_root=solver.setup.boundary_conditions) + _search("conduction", search_root=solver.setup.boundary_conditions) lines = capsys.readouterr().out.splitlines() assert ( '.wall[""].phase[""].shell_conduction[""] (Object)' in lines ) - pyfluent.search("conduction", search_root=solver.setup.boundary_conditions.wall) + _search("conduction", search_root=solver.setup.boundary_conditions.wall) lines = capsys.readouterr().out.splitlines() assert ( '[""].phase[""].shell_conduction[""] (Object)' in lines ) - pyfluent.search( - "conduction", search_root=solver.setup.boundary_conditions.wall["wall"] - ) + _search("conduction", search_root=solver.setup.boundary_conditions.wall["wall"]) lines = capsys.readouterr().out.splitlines() assert '.phase[""].shell_conduction[""] (Object)' in lines - pyfluent.search( + _search( "conduction", search_root=solver.setup.boundary_conditions.wall["wall"].phase ) lines = capsys.readouterr().out.splitlines()