From 36883e458ed37f97615188da0f203b49c65e4dae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mike=20F=C3=A4hrmann?= Date: Tue, 15 Oct 2024 08:24:06 +0200 Subject: [PATCH] use 'v[0] == "c"' instead of 'v.startswith("c")' --- gallery_dl/__init__.py | 2 +- gallery_dl/cookies.py | 9 ++++++--- gallery_dl/extractor/deviantart.py | 2 +- gallery_dl/extractor/exhentai.py | 6 +++--- gallery_dl/extractor/foolfuuka.py | 2 +- gallery_dl/extractor/mangakakalot.py | 6 +++--- gallery_dl/extractor/postmill.py | 2 +- gallery_dl/extractor/telegraph.py | 2 +- gallery_dl/extractor/tsumino.py | 6 +++--- gallery_dl/util.py | 7 ++++--- 10 files changed, 24 insertions(+), 20 deletions(-) diff --git a/gallery_dl/__init__.py b/gallery_dl/__init__.py index 7a9e0bee..62e96ae7 100644 --- a/gallery_dl/__init__.py +++ b/gallery_dl/__init__.py @@ -63,7 +63,7 @@ def main(): browser, _, profile = args.cookies_from_browser.partition(":") browser, _, keyring = browser.partition("+") browser, _, domain = browser.partition("/") - if profile.startswith(":"): + if profile and profile[0] == ":": container = profile[1:] profile = None else: diff --git a/gallery_dl/cookies.py b/gallery_dl/cookies.py index 6e97f443..48316628 100644 --- a/gallery_dl/cookies.py +++ b/gallery_dl/cookies.py @@ -74,7 +74,8 @@ def load_cookies_firefox(profile=None, container=None, domain=None): cookies = [ Cookie( 0, name, value, None, False, - domain, True if domain else False, domain.startswith("."), + domain, True if domain else False, + domain[0] == "." if domain else False, path, True if path else False, secure, expires, False, None, None, {}, ) @@ -158,7 +159,8 @@ def load_cookies_chromium(browser_name, profile=None, cookies.append(Cookie( 0, name, value, None, False, - domain, True if domain else False, domain.startswith("."), + domain, True if domain else False, + domain[0] == "." if domain else False, path, True if path else False, secure, expires or None, False, None, None, {}, )) @@ -323,7 +325,8 @@ def _safari_parse_cookies_record(data, cookies, host=None): cookies.append(Cookie( 0, name, value, None, False, - domain, True if domain else False, domain.startswith("."), + domain, True if domain else False, + domain[0] == "." if domain else False, path, True if path else False, is_secure, expiration_date, False, None, None, {}, )) diff --git a/gallery_dl/extractor/deviantart.py b/gallery_dl/extractor/deviantart.py index 3b10a8b1..693def99 100644 --- a/gallery_dl/extractor/deviantart.py +++ b/gallery_dl/extractor/deviantart.py @@ -401,7 +401,7 @@ class DeviantartExtractor(Extractor): html = content["html"] markup = html["markup"] - if not markup.startswith("{"): + if not markup or markup[0] != "{": return markup if html["type"] == "tiptap": diff --git a/gallery_dl/extractor/exhentai.py b/gallery_dl/extractor/exhentai.py index 01af7a4c..3e6d5378 100644 --- a/gallery_dl/extractor/exhentai.py +++ b/gallery_dl/extractor/exhentai.py @@ -260,9 +260,9 @@ class ExhentaiGalleryExtractor(ExhentaiExtractor): "torrentcount" : extr('>Torrent Download (', ')'), } - if data["uploader"].startswith("<"): - data["uploader"] = text.unescape(text.extr( - data["uploader"], ">", "<")) + uploader = data["uploader"] + if uploader and uploader[0] == "<": + data["uploader"] = text.unescape(text.extr(uploader, ">", "<")) f = data["favorites"][0] if f == "N": diff --git a/gallery_dl/extractor/foolfuuka.py b/gallery_dl/extractor/foolfuuka.py index 85dd8969..44c4542b 100644 --- a/gallery_dl/extractor/foolfuuka.py +++ b/gallery_dl/extractor/foolfuuka.py @@ -37,7 +37,7 @@ class FoolfuukaExtractor(BaseExtractor): if not url and "remote_media_link" in media: url = self.remote(media) - if url.startswith("/"): + if url and url[0] == "/": url = self.root + url post["filename"], _, post["extension"] = \ diff --git a/gallery_dl/extractor/mangakakalot.py b/gallery_dl/extractor/mangakakalot.py index 0183b25f..9fc8681d 100644 --- a/gallery_dl/extractor/mangakakalot.py +++ b/gallery_dl/extractor/mangakakalot.py @@ -19,7 +19,7 @@ BASE_PATTERN = r"(?:https?://)?(?:ww[\dw]?\.)?mangakakalot\.tv" class MangakakalotBase(): """Base class for mangakakalot extractors""" category = "mangakakalot" - root = "https://ww6.mangakakalot.tv" + root = "https://ww8.mangakakalot.tv" class MangakakalotChapterExtractor(MangakakalotBase, ChapterExtractor): @@ -40,7 +40,7 @@ class MangakakalotChapterExtractor(MangakakalotBase, ChapterExtractor): match = re.match( r"(?:[Vv]ol\. *(\d+) )?" r"[Cc]hapter *([^:]*)" - r"(?:: *(.+))?", info) + r"(?:: *(.+))?", info or "") volume, chapter, title = match.groups() if match else ("", "", info) chapter, sep, minor = chapter.partition(".") @@ -86,7 +86,7 @@ class MangakakalotMangaExtractor(MangakakalotBase, MangaExtractor): data["chapter"] = text.parse_int(chapter) data["chapter_minor"] = sep + minor - if url.startswith("/"): + if url[0] == "/": url = self.root + url results.append((url, data.copy())) return results diff --git a/gallery_dl/extractor/postmill.py b/gallery_dl/extractor/postmill.py index 29b351ba..88771758 100644 --- a/gallery_dl/extractor/postmill.py +++ b/gallery_dl/extractor/postmill.py @@ -50,7 +50,7 @@ class PostmillExtractor(BaseExtractor): forum = match.group(1) id = int(match.group(2)) - is_text_post = url.startswith("/") + is_text_post = (url[0] == "/") is_image_post = self._search_image_tag(page) is not None data = { "title": title, diff --git a/gallery_dl/extractor/telegraph.py b/gallery_dl/extractor/telegraph.py index dd5988f8..468840b2 100644 --- a/gallery_dl/extractor/telegraph.py +++ b/gallery_dl/extractor/telegraph.py @@ -49,7 +49,7 @@ class TelegraphGalleryExtractor(GalleryExtractor): url, pos = text.extract(figure, 'src="', '"') if url.startswith("/embed/"): continue - elif url.startswith("/"): + elif url[0] == "/": url = self.root + url caption, pos = text.extract(figure, "
", "<", pos) num += 1 diff --git a/gallery_dl/extractor/tsumino.py b/gallery_dl/extractor/tsumino.py index bce661a5..b196aeb4 100644 --- a/gallery_dl/extractor/tsumino.py +++ b/gallery_dl/extractor/tsumino.py @@ -148,8 +148,10 @@ class TsuminoSearchExtractor(TsuminoBase, Extractor): data["PageNumber"] += 1 def _parse(self, query): + if not query: + return {} try: - if query.startswith("?"): + if query[0] == "?": return self._parse_simple(query) return self._parse_jsurl(query) except Exception as exc: @@ -187,8 +189,6 @@ class TsuminoSearchExtractor(TsuminoBase, Extractor): Example: ~(name~'John*20Doe~age~42~children~(~'Mary~'Bill)) Ref: https://github.com/Sage/jsurl """ - if not data: - return {} i = 0 imax = len(data) diff --git a/gallery_dl/util.py b/gallery_dl/util.py index a269a2b6..80c53cb8 100644 --- a/gallery_dl/util.py +++ b/gallery_dl/util.py @@ -432,7 +432,7 @@ def cookiestxt_load(fp): None, False, domain, domain_specified == "TRUE", - domain.startswith("."), + domain[0] == "." if domain else False, path, False, secure == "TRUE", None if expires == "0" or not expires else expires, @@ -458,9 +458,10 @@ def cookiestxt_store(fp, cookies): name = cookie.name value = cookie.value + domain = cookie.domain write("\t".join(( - cookie.domain, - "TRUE" if cookie.domain.startswith(".") else "FALSE", + domain, + "TRUE" if domain and domain[0] == "." else "FALSE", cookie.path, "TRUE" if cookie.secure else "FALSE", "0" if cookie.expires is None else str(cookie.expires),