From dda3e64fffd0da8c3a988e53c19487c87e4d50ef Mon Sep 17 00:00:00 2001 From: xxyzz Date: Wed, 7 Aug 2024 19:38:47 +0800 Subject: [PATCH] Remove "use GPU" option not much benefit but lots of errors --- config.py | 31 +------------------------------ data/deps.json | 5 ----- deps.py | 22 ---------------------- docs/installation.rst | 14 -------------- docs/usage.rst | 2 -- error_dialogs.py | 10 ---------- parse_job.py | 9 ++------- utils.py | 13 +++---------- 8 files changed, 6 insertions(+), 100 deletions(-) diff --git a/config.py b/config.py index 68f5b55..b6d75d1 100644 --- a/config.py +++ b/config.py @@ -4,7 +4,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Any -from calibre.constants import isfrozen, ismacos +from calibre.constants import isfrozen from calibre.gui2 import Dispatcher from calibre.gui2.threaded_jobs import ThreadedJob from calibre.utils.config import JSONConfig @@ -55,8 +55,6 @@ prefs.defaults["choose_format_manually"] = True prefs.defaults["wiktionary_gloss_lang"] = "en" prefs.defaults["kindle_gloss_lang"] = "en" -prefs.defaults["use_gpu"] = False -prefs.defaults["cuda"] = "cu121" prefs.defaults["use_wiktionary_for_kindle"] = False prefs.defaults["remove_link_styles"] = False prefs.defaults["python_path"] = "" @@ -124,30 +122,6 @@ def __init__(self): self.python_path.setText(prefs["python_path"]) form_layout.addRow(python_path_label, self.python_path) - if not ismacos: - self.use_gpu_box = QCheckBox(_("Run spaCy with GPU(requires CUDA)")) - self.use_gpu_box.setToolTip( - _( - "GPU will be used when creating X-Ray file if spaCy has transformer" - " model for the book language with ner component." - ) - ) - self.use_gpu_box.setChecked(prefs["use_gpu"]) - vl.addWidget(self.use_gpu_box) - - cuda_versions = {"cu121": "CUDA 12.1", "cu118": "CUDA 11.8"} - self.cuda_version_box = QComboBox() - for cuda_version, text in cuda_versions.items(): - self.cuda_version_box.addItem(text, cuda_version) - if prefs["cuda"] not in cuda_versions: - prefs["cuda"] = "cu121" - self.cuda_version_box.setCurrentText(cuda_versions[prefs["cuda"]]) - cuda_version_label = QLabel(_("CUDA version")) - cuda_version_label.setToolTip( - _('Use command "nvcc --version" to check CUDA version') - ) - form_layout.addRow(cuda_version_label, self.cuda_version_box) - model_size_label = QLabel( _('spaCy model size') ) @@ -232,9 +206,6 @@ def save_settings(self) -> None: prefs["add_locator_map"] = self.locator_map_box.isChecked() prefs["minimal_x_ray_count"] = self.minimal_x_ray_count.value() prefs["remove_link_styles"] = self.remove_link_styles.isChecked() - if not ismacos: - prefs["use_gpu"] = self.use_gpu_box.isChecked() - prefs["cuda"] = self.cuda_version_box.currentData() mediawiki_api = self.mediawiki_api.text().strip("/ ") if mediawiki_api.endswith("/api.php") or mediawiki_api == "": prefs["mediawiki_api"] = mediawiki_api diff --git a/data/deps.json b/data/deps.json index 116c6c6..11fd8fe 100644 --- a/data/deps.json +++ b/data/deps.json @@ -1,13 +1,8 @@ { - "cupy": "12.3.0", "lxml": "5.2.2", "rapidfuzz": "3.9.4", "spacy": "3.7.5", "spacy_cpu_model": "3.7.0", - "spacy_trf_model": "3.7.2", "en_spacy_cpu_model": "3.7.1", - "en_spacy_trf_model": "3.7.3", "thinc-apple-ops": "0.1.5", - "torch": "2.4.0", - "typing-extensions": "4.12.2" } diff --git a/deps.py b/deps.py index a3c2917..035cc33 100644 --- a/deps.py +++ b/deps.py @@ -51,26 +51,6 @@ def install_deps(pkg: str, notif: Any) -> None: f"{pkg}-{model_version}/{pkg}-{model_version}-py3-none-any.whl" ) pip_install(pkg, model_version, url=url, notif=notif) - if pkg.endswith("_trf"): - from .config import prefs - - pip_install("cupy-wheel", dep_versions["cupy"], notif=notif) - # PyTorch's Windows package on pypi.org is CPU build version, - # reintall the CUDA build version - if iswindows or prefs["cuda"] == "cu118": - pip_install( - "torch", - dep_versions["torch"], - extra_index=f"https://download.pytorch.org/whl/{prefs['cuda']}", - notif=notif, - ) - # an old version of typing-extensions(4.4.0) is installed - # from pytorch's index which is incompatible with pydantic 2.4.2 - pip_install( - "typing-extensions", - dep_versions["typing-extensions"], - notif=notif, - ) if ismacos and platform.machine() == "arm64": pip_install( @@ -125,8 +105,6 @@ def pip_install( notif: Any = None, ) -> None: pattern = f"{pkg.replace('-', '_')}-{pkg_version}*" - if pkg == "torch" and extra_index: - pattern = f"torch-{pkg_version}+{extra_index.split('/')[-1]}*" if not any(LIBS_PATH.glob(pattern)): if notif: notif.put((0, f"Installing {pkg}")) diff --git a/docs/installation.rst b/docs/installation.rst index aef7684..7da6322 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -71,20 +71,6 @@ Use Chocolatey or download from https://www.python.org/downloads .. attention:: - Do not change the default installation settings in the Python installer. - - The dependencies(`PyTorch `_) of the transformer model may not support the latest Python and CUDA version. - - -Install CUDA(optional) ----------------------- - -`CUDA `_ is required for the "Run spaCy with GPU" feature, you can download CUDA from https://developer.nvidia.com/cuda-toolkit-archive - -.. attention:: - - The latest CUDA release usually is not supported by PyTorch, read https://pytorch.org/get-started/locally to find the supported CUDA versions. - - - C/C++ compiler is needed for Windows, download from https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2022 - - - Read the installation guide on the CUDA download page for more information. Install WordDumb plugin ----------------------- diff --git a/docs/usage.rst b/docs/usage.rst index 34ba5c2..68a652e 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -8,8 +8,6 @@ Set preferences - Enable "Fetch X-Ray people descriptions from Wikipedia or other MediaWiki server" option for nonfiction books and novels that have character pages on Wikipedia or any other MediaWiki server. A quote from the book will be used if it's disabled or the page is not found. -- Enable "Run spaCy with GPU" option if your machine has `CUDA `_. GPU will be used when creating X-Ray file if spaCy has transformer model for the book language with ner component. - - Larger spaCy model has higher `Named-entity recognition `_ precision therefore improves X-Ray quality, more details at https://spacy.io/models/en - Enter a `MediaWiki Action API `_ link to get X-Ray descriptions from a MediaWiki server, delete the link to search Wikipedia. Most MediaWiki Action API endpoint is ``https://wiki.domain/w/api.php`` but some servers don't have the ``/w`` part, you can check the API URL in a browser. diff --git a/error_dialogs.py b/error_dialogs.py index cef5e66..6ab3f69 100644 --- a/error_dialogs.py +++ b/error_dialogs.py @@ -89,16 +89,6 @@ def subprocess_error(job: Any, parent: Any) -> None: ) elif "ModuleNotFoundError" in exception: module_not_found_error(job.details + exception, parent) - elif "Unable to detect NVIDIA CUDA" in exception: - error_dialog( - _("Can't find CUDA"), - _( - "'Run spaCy with GPU' feature requires " - "CUDA" - ), - job.details + exception, - parent, - ) else: check_network_error(job.details + exception, parent) diff --git a/parse_job.py b/parse_job.py index 3fd67a0..a15b2bb 100644 --- a/parse_job.py +++ b/parse_job.py @@ -728,17 +728,12 @@ def load_spacy(model: str, book_path: str | None, lemma_lang: str) -> Any: if model == "": return spacy.blank(lemma_lang) - excluded_components = [] + excluded_components = ["parser"] if book_path is None: excluded_components.append("ner") - if model.endswith("_trf"): - spacy.require_gpu() - else: - excluded_components.append("parser") - nlp = spacy.load(model, exclude=excluded_components) - if not model.endswith("_trf") and book_path is not None: + if book_path is not None: # simpler and faster https://spacy.io/usage/linguistic-features#sbd nlp.enable_pipe("senter") diff --git a/utils.py b/utils.py index d066a0f..aaca0fc 100644 --- a/utils.py +++ b/utils.py @@ -28,8 +28,6 @@ class Prefs(TypedDict): choose_format_manually: bool wiktionary_gloss_lang: str kindle_gloss_lang: str - use_gpu: bool - cuda: str last_opened_kindle_lemmas_language: str last_opened_wiktionary_lemmas_language: str use_wiktionary_for_kindle: bool @@ -165,11 +163,7 @@ def spacy_model_name(lemma_lang: str, prefs: Prefs) -> str: spacy_model = languages[lemma_lang]["spacy"] if spacy_model == "": return "" - if prefs["use_gpu"] and languages[lemma_lang]["has_trf"]: - spacy_model += "trf" - else: - spacy_model += prefs["model_size"] - return spacy_model + return spacy_model + prefs["model_size"] def load_languages_data( @@ -189,9 +183,8 @@ def load_languages_data( def get_spacy_model_version( model_name: str, dependency_versions: dict[str, str] ) -> str: - key = "spacy_trf_model" if model_name.endswith("_trf") else "spacy_cpu_model" lang_code = model_name[:2] - lang_key = f"{lang_code}_{key}" + lang_key = f"{lang_code}_spacy_cpu_model" if lang_key in dependency_versions: return dependency_versions[lang_key] - return dependency_versions.get(key, "") + return dependency_versions.get("spacy_cpu_model", "")