From a427bb55577b8b947773aa48cbc9d1202c4ba76e Mon Sep 17 00:00:00 2001 From: ACA Date: Sun, 4 Feb 2024 19:07:02 +0100 Subject: [PATCH] fix tw.m.ixdzs.com & www.aixdzs.com sources (now redirect to new domain) --- sources/zh/aixdzs.py | 62 --------------------------------------- sources/zh/ixdzs.py | 69 ++++++++++++++++++++++++++++++++------------ 2 files changed, 50 insertions(+), 81 deletions(-) delete mode 100644 sources/zh/aixdzs.py diff --git a/sources/zh/aixdzs.py b/sources/zh/aixdzs.py deleted file mode 100644 index 29dc1350c..000000000 --- a/sources/zh/aixdzs.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -import logging - - -from lncrawl.core.crawler import Crawler - -logger = logging.getLogger(__name__) - -chapter_list_url = "https://read.aixdzs.com/%s" - - -class AixdzsCrawler(Crawler): - base_url = "https://www.aixdzs.com" - - def read_novel_info(self): - if not self.novel_url.endswith("/"): - self.novel_url += "/" - logger.debug("Visiting %s", self.novel_url) - soup = self.get_soup(self.novel_url) - - possible_title = soup.select_one(".fdl .d_info h1") - assert possible_title, "No novel title" - self.novel_title = possible_title.text.strip() - logger.info("Novel title: %s", self.novel_title) - - possible_novel_cover = soup.select_one('meta[property="og:image"]') - if possible_novel_cover: - self.novel_cover = self.absolute_url(possible_novel_cover["content"]) - logger.info("Novel cover: %s", self.novel_cover) - - possible_novel_author = soup.select_one('meta[property="og:novel:author"]') - if possible_novel_author: - self.novel_author = possible_novel_author["content"] - logger.info("%s", self.novel_author) - - # parsed_url = urlparse(self.novel_url) - # parsed_path = parsed_url.path.strip('/').split('/') - # chapter_url = chapter_list_url % ('/'.join(parsed_path[1:])) - # logger.debug('Visiting %s', chapter_url) - # soup = self.get_soup(chapter_url) - - volumes = set([]) - for a in soup.select("div.catalog li a"): - ch_id = len(self.chapters) + 1 - vol_id = 1 + len(self.chapters) // 100 - volumes.add(vol_id) - self.chapters.append( - { - "id": ch_id, - "volume": vol_id, - "title": a.text, - "url": self.absolute_url(a["href"]), - } - ) - - self.volumes = [{"id": x, "title": ""} for x in volumes] - - def download_chapter_body(self, chapter): - soup = self.get_soup(chapter["url"]) - contents = soup.select(".content > p") - contents = [str(p) for p in contents if p.text.strip()] - return "".join(contents) diff --git a/sources/zh/ixdzs.py b/sources/zh/ixdzs.py index 774c0d485..1ad0327f9 100644 --- a/sources/zh/ixdzs.py +++ b/sources/zh/ixdzs.py @@ -1,14 +1,30 @@ # -*- coding: utf-8 -*- import logging from lncrawl.core.crawler import Crawler +from lncrawl.models import Volume, Chapter logger = logging.getLogger(__name__) -search_url = "https://tw.m.ixdzs.com/search?k=%s" +search_url = "https://ixdzs8.tw/bsearch?q=%s" class IxdzsCrawler(Crawler): - - base_url = ["https://tw.m.ixdzs.com/"] + base_url = ["https://ixdzs8.tw/", "https://ixdzs8.com/", # new + "https://tw.m.ixdzs.com/", "https://www.aixdzs.com"] # legacy / redirect domains + + def initialize(self) -> None: + self.cleaner.bad_css.add("p.abg") # advertisement + + @staticmethod + def rectify_url(url: str) -> str: + """ + This manually 'fixes' URLs and prepares them for usage later on in string templating. + """ + url = url[:-1] if not url.endswith("/") else url + if "https://tw.m.ixdzs.com" in url: + return url.replace("https://tw.m.ixdzs.com", "https://ixdzs8.tw") + if "https://www.aixdzs.com" in url: + return url.replace("https://www.aixdzs.com", "https://ixdzs8.com") + return url def search_novel(self, query): query = query.lower().replace(" ", "+") @@ -16,7 +32,7 @@ def search_novel(self, query): results = [] for data in soup.select( - "ul.ix-list.ix-border-t li div.ix-list-info.ix-border-t.burl" + "main > div.panel > ul.u-list > li.burl" ): title = data.select_one("h3 a").get_text().strip() url = self.absolute_url(data.select_one("h3 a")["href"]) @@ -30,45 +46,60 @@ def search_novel(self, query): def read_novel_info(self): """Get novel title, author, cover etc""" + self.novel_url = self.rectify_url(self.novel_url) logger.debug("Visiting %s", self.novel_url) soup = self.get_soup(self.novel_url) + content = soup.select_one("div.novel") + metadata = content.select_one("div.n-text") - possible_title = soup.select_one("header.ix-header.ix-border.ix-page h1") + possible_title = metadata.select_one("h1") assert possible_title, "No novel title" self.novel_title = possible_title.get_text() logger.info(f"Novel title: {self.novel_title}") - self.novel_author = soup.select_one( - "div.ix-list-info.ui-border-t p" - ).get_text()[3:] + self.novel_author = metadata.select_one( + "a.bauthor" + ).get_text() logger.info(f"Novel Author: {self.novel_author}") - possible_novel_cover = soup.select_one("div.ix-list-img-square img") + possible_novel_cover = content.select_one("div.n-img > img") if possible_novel_cover: self.novel_cover = self.absolute_url(possible_novel_cover["src"]) logger.info(f"Novel Cover: {self.novel_cover}") + possible_synopsis = soup.select_one("p#intro") + if possible_synopsis: + self.novel_synopsis = possible_synopsis.get_text() + logger.info("Getting chapters...") - for chapter in soup.select("ul.chapter li a"): - title = chapter.get_text() - url = self.absolute_url(chapter["href"]) - chap_id = len(self.chapters) + 1 + last_chap_a = soup.select_one("ul.u-chapter > li:nth-child(1) > a") + last_chap_url = self.absolute_url(last_chap_a["href"]) + last_chap_id = int(last_chap_url.split("/")[-1][1:].replace(".html", "").strip()) + logger.info(f"URL: {last_chap_url}, {last_chap_id}") + + for chap_id in range(1, last_chap_id + 1): if len(self.chapters) % 100 == 0: vol_id = chap_id // 100 + 1 - vol_title = "Volume " + str(vol_id) - self.volumes.append({"id": vol_id, "title": vol_title}) - - self.chapters.append( - {"id": chap_id, "title": title, "url": url, "volume": vol_id} - ) + vol_title = f"Volume {vol_id}" + self.volumes.append(Volume(vol_id, vol_title)) + self.chapters.append(Chapter( + id=chap_id, + title=f"Chapter {chap_id}", + url=f"{self.novel_url}/p{chap_id}.html", + )) def download_chapter_body(self, chapter): logger.info(f"Downloading {chapter['url']}") soup = self.get_soup(chapter["url"]) + possible_chapter_title = soup.select_one("article.page-content > h3") + if possible_chapter_title: + chapter.title = possible_chapter_title.get_text().strip() + content = soup.select("article.page-content section p") + content = self.cleaner.clean_contents(content) content = "\n".join(str(p) for p in content) return content