1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-25 12:12:34 +01:00
gallery-dl/scripts/supportedsites.py

622 lines
18 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Generate a Markdown document listing all supported sites"""
import os
import sys
2019-02-20 19:25:41 +01:00
import collections
import util
from gallery_dl import extractor
2023-09-11 17:36:44 +02:00
try:
from test import results
except ImportError:
results = None
CATEGORY_MAP = {
2017-07-15 15:01:30 +02:00
"2chan" : "Futaba Channel",
"35photo" : "35PHOTO",
"adultempire" : "Adult Empire",
"agnph" : "AGNPH",
"allgirlbooru" : "All girl",
2024-09-15 22:34:56 +02:00
"ao3" : "Archive of Our Own",
2017-07-15 15:01:30 +02:00
"archivedmoe" : "Archived.Moe",
"archiveofsins" : "Archive of Sins",
"artstation" : "ArtStation",
"aryion" : "Eka's Portal",
"atfbooru" : "ATFBooru",
2024-02-29 21:49:13 +01:00
"azurlanewiki" : "Azur Lane Wiki",
"b4k" : "arch.b4k.co",
"baraag" : "baraag",
"batoto" : "BATO.TO",
"bbc" : "BBC",
"cien" : "Ci-en",
"cohost" : "cohost!",
"comicvine" : "Comic Vine",
"coomerparty" : "Coomer",
"deltaporno" : "DeltaPorno",
"deviantart" : "DeviantArt",
"drawfriends" : "Draw Friends",
"dynastyscans" : "Dynasty Reader",
"e621" : "e621",
"e926" : "e926",
2023-07-18 18:16:30 +02:00
"e6ai" : "e6AI",
"erome" : "EroMe",
"e-hentai" : "E-Hentai",
"exhentai" : "ExHentai",
"fallenangels" : "Fallen Angels Scans",
"fanbox" : "pixivFANBOX",
"fashionnova" : "Fashion Nova",
2020-02-11 19:51:24 +01:00
"furaffinity" : "Fur Affinity",
2024-01-08 15:29:47 +01:00
"hatenablog" : "HatenaBlog",
"hbrowse" : "HBrowse",
"hentai2read" : "Hentai2Read",
2021-04-18 20:28:00 +02:00
"hentaicosplays" : "Hentai Cosplay",
"hentaifoundry" : "Hentai Foundry",
"hentaifox" : "HentaiFox",
"hentaihand" : "HentaiHand",
"hentaihere" : "HentaiHere",
2021-04-18 20:28:00 +02:00
"hentaiimg" : "Hentai Image",
"hentainexus" : "HentaiNexus",
"hitomi" : "Hitomi.la",
"horne" : "horne",
"idolcomplex" : "Idol Complex",
"illusioncardsbooru": "Illusion Game Cards",
"imagebam" : "ImageBam",
"imagefap" : "ImageFap",
2019-07-30 23:02:21 +02:00
"imgbb" : "ImgBB",
"imgbox" : "imgbox",
"imagechest" : "ImageChest",
"imgkiwi" : "IMG.Kiwi",
"imgth" : "imgth",
"imgur" : "imgur",
2021-11-25 16:18:48 +01:00
"joyreactor" : "JoyReactor",
2023-04-26 18:50:09 +02:00
"itchio" : "itch.io",
2023-05-25 18:58:51 +02:00
"jpgfish" : "JPG Fish",
"kabeuchi" : "かべうち",
"kemonoparty" : "Kemono",
"livedoor" : "livedoor Blog",
2023-01-31 19:54:41 +01:00
"ohpolly" : "Oh Polly",
"omgmiamiswimwear": "Omg Miami Swimwear",
"mangadex" : "MangaDex",
"mangafox" : "Manga Fox",
"mangahere" : "Manga Here",
"mangakakalot" : "MangaKakalot",
"mangalife" : "MangaLife",
"manganelo" : "Manganato",
"mangapark" : "MangaPark",
"mangaread" : "MangaRead",
"mangasee" : "MangaSee",
"mariowiki" : "Super Mario Wiki",
"mastodon.social": "mastodon.social",
"mediawiki" : "MediaWiki",
"micmicidol" : "MIC MIC IDOL",
"myhentaigallery": "My Hentai Gallery",
"myportfolio" : "Adobe Portfolio",
"naverwebtoon" : "NaverWebtoon",
"nhentai" : "nhentai",
"nijie" : "nijie",
"nozomi" : "Nozomi.la",
"nsfwalbum" : "NSFWalbum.com",
"paheal" : "rule #34",
2021-01-22 19:41:50 +01:00
"photovogue" : "PhotoVogue",
"pidgiwiki" : "PidgiWiki",
"pixeldrain" : "pixeldrain",
2021-04-18 20:28:00 +02:00
"pornimagesxxx" : "Porn Image",
"pornpics" : "PornPics.com",
2021-11-25 16:18:48 +01:00
"pornreactor" : "PornReactor",
"readcomiconline": "Read Comic Online",
2017-07-24 10:50:40 +02:00
"rbt" : "RebeccaBlackTech",
2020-05-10 00:31:42 +02:00
"redgifs" : "RedGIFs",
"rozenarcana" : "Rozen Arcana",
"rule34" : "Rule 34",
"rule34hentai" : "Rule34Hentai",
"rule34us" : "Rule 34",
"sankaku" : "Sankaku Channel",
"sankakucomplex" : "Sankaku Complex",
"seiga" : "Niconico Seiga",
"senmanga" : "Sen Manga",
"sensescans" : "Sense-Scans",
"sexcom" : "Sex.com",
"simplyhentai" : "Simply Hentai",
2019-06-09 21:59:22 +02:00
"slickpic" : "SlickPic",
"slideshare" : "SlideShare",
"smugmug" : "SmugMug",
"speakerdeck" : "Speaker Deck",
"steamgriddb" : "SteamGridDB",
"subscribestar" : "SubscribeStar",
"tbib" : "The Big ImageBoard",
"tcbscans" : "TCB Scans",
"tco" : "Twitter t.co",
2023-11-21 20:24:07 +01:00
"tmohentai" : "TMOHentai",
"thatpervert" : "ThatPervert",
2017-07-24 10:50:40 +02:00
"thebarchive" : "The /b/ Archive",
"thecollection" : "The /co/llection",
"tumblrgallery" : "TumblrGallery",
"vanillarock" : "もえぴりあ",
2023-07-14 15:09:39 +02:00
"vidyart2" : "/v/idyart2",
"vidyapics" : "Vidya Booru",
2021-03-23 18:48:01 +01:00
"vk" : "VK",
2019-07-22 22:15:36 +02:00
"vsco" : "VSCO",
"wallpapercave" : "Wallpaper Cave",
"webmshare" : "webmshare",
"webtoons" : "Webtoon",
"wikiart" : "WikiArt.org",
2024-04-09 19:24:01 +02:00
"wikigg" : "wiki.gg",
"wikimediacommons": "Wikimedia Commons",
"xbunkr" : "xBunkr",
"xhamster" : "xHamster",
"xvideos" : "XVideos",
"yandere" : "yande.re",
}
SUBCATEGORY_MAP = {
"" : "",
"art" : "Art",
"audio" : "Audio",
"doujin" : "Doujin",
"home" : "Home Feed",
"image" : "individual Images",
"index" : "Site Index",
"info" : "User Profile Information",
"issue" : "Comic Issues",
"manga" : "Manga",
"media" : "Media Files",
2023-03-01 12:30:56 +01:00
"note" : "Images from Notes",
"popular": "Popular Images",
"recent" : "Recent Images",
"search" : "Search Results",
"status" : "Images from Statuses",
"tag" : "Tag Searches",
"tweets" : "",
"user" : "User Profiles",
"watch" : "Watches",
2023-08-24 15:01:26 +02:00
"following" : "Followed Users",
"related-pin" : "related Pins",
"related-board": "",
2024-09-15 22:34:56 +02:00
"ao3": {
"user-works" : "",
"user-series" : "",
"user-bookmark": "Bookmarks",
},
"artstation": {
"artwork": "Artwork Listings",
"collections": "",
},
"bluesky": {
"posts": "",
},
"coomerparty": {
"discord" : "",
"discord-server": "",
"posts" : "",
},
2021-09-17 20:09:24 +02:00
"desktopography": {
"site": "",
},
"deviantart": {
"gallery-search": "Gallery Searches",
"stash" : "Sta.sh",
"status": "Status Updates",
"watch-posts": "",
},
"fanbox": {
"supporting": "Supported User Feed",
"redirect" : "Pixiv Redirects",
},
"fapello": {
"path": ["Videos", "Trending Posts", "Popular Videos", "Top Models"],
},
"furaffinity": {
"submissions": "New Submissions",
},
2024-01-08 15:29:47 +01:00
"hatenablog": {
"archive": "Archive",
"entry" : "Individual Posts",
},
"hentaifoundry": {
"story": "",
},
"imgur": {
"favorite-folder": "Favorites Folders",
},
"inkbunny": {
"unread": "Unread Submissions",
},
2020-03-16 22:57:30 +01:00
"instagram": {
"posts": "",
2020-03-16 22:57:30 +01:00
"saved": "Saved Posts",
"tagged": "Tagged Posts",
2020-03-16 22:57:30 +01:00
},
"kemonoparty": {
"discord" : "Discord Servers",
"discord-server": "",
"posts" : "",
},
"lensdump": {
"albums": "",
},
"mangadex": {
"feed" : "Followed Feed",
},
"nijie": {
"followed": "Followed Users",
"nuita" : "Nuita History",
},
"pinterest": {
"board": "",
"pinit": "pin.it Links",
"created": "Created Pins",
"allpins": "All Pins",
},
"pixiv": {
"me" : "pixiv.me Links",
"novel-bookmark": "Novel Bookmarks",
"novel-series": "Novel Series",
"novel-user": "",
"pixivision": "pixivision",
2021-10-12 20:50:11 +02:00
"sketch": "Sketch",
"work": "individual Images",
},
2023-12-21 19:50:54 +01:00
"poringa": {
"post": "Posts Images",
},
2023-08-29 19:34:27 +02:00
"pornhub": {
"gifs": "",
},
2023-12-12 10:54:34 +01:00
"raddle": {
"usersubmissions": "User Profiles",
"post" : "Individual Posts",
"shorturl" : "",
},
"redgifs": {
"collections": "",
},
"sankaku": {
"books": "Book Searches",
},
"sexcom": {
"pins": "User Pins",
},
"skeb": {
"following" : "Followed Creators",
"following-users": "Followed Users",
},
"smugmug": {
"path": "Images from Users and Folders",
},
"steamgriddb": {
"asset": "Individual Assets",
},
2023-04-24 22:01:47 +02:00
"tumblr": {
"day": "Days",
},
"twitter": {
"media": "Media Timelines",
"tweets": "",
"replies": "",
2024-02-13 01:17:13 +01:00
"community": "",
"list-members": "List Members",
},
"vk": {
"tagged": "Tagged Photos",
},
"vsco": {
"spaces": "",
},
"wallhaven": {
"collections": "",
"uploads" : "",
},
"wallpapercave": {
"image": ["individual Images", "Search Results"],
},
"weasyl": {
"journals" : "",
"submissions": "",
},
"weibo": {
"home": "",
"newvideo": "",
},
"wikiart": {
"artists": "Artist Listings",
},
"wikimedia": {
"article": ["Articles", "Categories", "Files"],
},
}
BASE_MAP = {
"E621" : "e621 Instances",
"foolfuuka" : "FoolFuuka 4chan Archives",
"foolslide" : "FoOlSlide Instances",
"gelbooru_v01": "Gelbooru Beta 0.1.11",
"gelbooru_v02": "Gelbooru Beta 0.2",
2022-12-22 23:50:56 +01:00
"jschan" : "jschan Imageboards",
"lolisafe" : "lolisafe and chibisafe",
"lynxchan" : "LynxChan Imageboards",
"moebooru" : "Moebooru and MyImouto",
"szurubooru" : "szurubooru Instances",
"urlshortener": "URL Shorteners",
"vichan" : "vichan Imageboards",
}
URL_MAP = {
"blogspot" : "https://www.blogger.com/",
"wikimedia": "https://www.wikimedia.org/",
}
_OAUTH = '<a href="https://github.com/mikf/gallery-dl#oauth">OAuth</a>'
_COOKIES = '<a href="https://github.com/mikf/gallery-dl#cookies">Cookies</a>'
_APIKEY_DB = ('<a href="https://gdl-org.github.io/docs/configuration.html'
'#extractor-derpibooru-api-key">API Key</a>')
_APIKEY_WH = ('<a href="https://gdl-org.github.io/docs/configuration.html'
'#extractor-wallhaven-api-key">API Key</a>')
_APIKEY_WY = ('<a href="https://gdl-org.github.io/docs/configuration.html'
'#extractor-weasyl-api-key">API Key</a>')
AUTH_MAP = {
"aibooru" : "Supported",
"aryion" : "Supported",
"atfbooru" : "Supported",
"baraag" : _OAUTH,
"bluesky" : "Supported",
"booruvar" : "Supported",
"coomerparty" : "Supported",
"danbooru" : "Supported",
"derpibooru" : _APIKEY_DB,
"deviantart" : _OAUTH,
"e621" : "Supported",
"e6ai" : "Supported",
"e926" : "Supported",
"e-hentai" : "Supported",
"exhentai" : "Supported",
"fanbox" : _COOKIES,
"fantia" : _COOKIES,
"flickr" : _OAUTH,
"furaffinity" : _COOKIES,
"furbooru" : "API Key",
"horne" : "Required",
"idolcomplex" : "Supported",
"imgbb" : "Supported",
"inkbunny" : "Supported",
"instagram" : _COOKIES,
"kemonoparty" : "Supported",
"mangadex" : "Supported",
"mangoxo" : "Supported",
"mastodon.social": _OAUTH,
"newgrounds" : "Supported",
"nijie" : "Required",
"patreon" : _COOKIES,
"pawoo" : _OAUTH,
"pillowfort" : "Supported",
"pinterest" : _COOKIES,
"pixiv" : _OAUTH,
"ponybooru" : "API Key",
"reddit" : _OAUTH,
"sankaku" : "Supported",
"seiga" : _COOKIES,
"smugmug" : _OAUTH,
"subscribestar" : "Supported",
"tapas" : "Supported",
"tsumino" : "Supported",
"tumblr" : _OAUTH,
"twitter" : "Supported",
"vipergirls" : "Supported",
"wallhaven" : _APIKEY_WH,
"weasyl" : _APIKEY_WY,
"zerochan" : "Supported",
}
IGNORE_LIST = (
2019-02-20 19:25:41 +01:00
"directlink",
"oauth",
2019-02-20 19:25:41 +01:00
"recursive",
"test",
"ytdl",
"generic",
)
2019-02-20 19:25:41 +01:00
def domain(cls):
"""Return the domain name associated with an extractor class"""
try:
url = sys.modules[cls.__module__].__doc__.split()[-1]
if url.startswith("http"):
return url
except Exception:
pass
2019-02-20 19:25:41 +01:00
if hasattr(cls, "root") and cls.root:
return cls.root + "/"
2018-09-28 12:39:05 +02:00
2023-09-11 17:36:44 +02:00
url = cls.example
return url[:url.find("/", 8)+1]
2021-02-20 22:31:21 +01:00
def category_text(c):
2019-02-20 19:25:41 +01:00
"""Return a human-readable representation of a category"""
return CATEGORY_MAP.get(c) or c.capitalize()
2018-09-28 12:39:05 +02:00
def subcategory_text(bc, c, sc):
2019-02-20 19:25:41 +01:00
"""Return a human-readable representation of a subcategory"""
if c in SUBCATEGORY_MAP:
scm = SUBCATEGORY_MAP[c]
if sc in scm:
txt = scm[sc]
if not isinstance(txt, str):
txt = ", ".join(txt)
return txt
if bc and bc in SUBCATEGORY_MAP:
scm = SUBCATEGORY_MAP[bc]
if sc in scm:
txt = scm[sc]
if not isinstance(txt, str):
txt = ", ".join(txt)
return txt
2019-02-20 19:25:41 +01:00
if sc in SUBCATEGORY_MAP:
return SUBCATEGORY_MAP[sc]
2019-02-20 19:25:41 +01:00
sc = sc.capitalize()
if sc.endswith("y"):
sc = sc[:-1] + "ies"
elif not sc.endswith("s"):
sc += "s"
return sc
2021-02-20 22:31:21 +01:00
def category_key(c):
2019-02-20 19:25:41 +01:00
"""Generate sorting keys by category"""
2021-02-20 22:31:21 +01:00
return category_text(c[0]).lower()
2021-02-20 22:31:21 +01:00
def subcategory_key(sc):
2019-02-20 19:25:41 +01:00
"""Generate sorting keys by subcategory"""
2021-02-20 22:31:21 +01:00
return "A" if sc == "issue" else sc
2017-07-15 15:01:30 +02:00
2019-02-20 19:25:41 +01:00
def build_extractor_list():
"""Generate a sorted list of lists of extractor classes"""
categories = collections.defaultdict(lambda: collections.defaultdict(list))
default = categories[""]
domains = {"": ""}
2021-02-20 22:31:21 +01:00
for extr in extractor._list_classes():
category = extr.category
if category in IGNORE_LIST:
continue
2021-02-20 22:31:21 +01:00
if category:
default[category].append(extr.subcategory)
2021-02-20 22:31:21 +01:00
if category not in domains:
domains[category] = domain(extr)
else:
base = categories[extr.basecategory]
if not extr.instances:
base[""].append(extr.subcategory)
continue
for category, root, info in extr.instances:
base[category].append(extr.subcategory)
2021-02-20 22:31:21 +01:00
if category not in domains:
if not root:
if category in URL_MAP:
root = URL_MAP[category].rstrip("/")
elif results:
# use domain from first matching test
test = results.category(category)[0]
root = test["#class"].from_url(test["#url"]).root
2021-02-20 22:31:21 +01:00
domains[category] = root + "/"
2021-02-20 22:31:21 +01:00
# sort subcategory lists
for base in categories.values():
for subcategories in base.values():
subcategories.sort(key=subcategory_key)
2021-02-20 22:31:21 +01:00
# add e-hentai.org
default["e-hentai"] = default["exhentai"]
2021-02-20 22:31:21 +01:00
domains["e-hentai"] = domains["exhentai"].replace("x", "-")
# add coomer.party
default["coomerparty"] = default["kemonoparty"]
domains["coomerparty"] = domains["kemonoparty"].replace("kemono", "coomer")
2021-04-18 20:28:00 +02:00
# add hentai-cosplays sister sites (hentai-img, porn-images-xxx)
default["hentaiimg"] = default["hentaicosplays"]
domains["hentaiimg"] = "https://hentai-img.com/"
default["pornimagesxxx"] = default["hentaicosplays"]
domains["pornimagesxxx"] = "https://porn-images-xxx.com/"
# add manga4life.com
default["mangalife"] = default["mangasee"]
domains["mangalife"] = "https://manga4life.com/"
# add wikifeetx.com
default["wikifeetx"] = default["wikifeet"]
domains["wikifeetx"] = "https://www.wikifeetx.com/"
2021-02-20 22:31:21 +01:00
return categories, domains
2019-02-20 19:25:41 +01:00
# define table columns
COLUMNS = (
("Site", 20,
lambda bc, c, scs, d: category_text(c)),
2019-02-20 19:25:41 +01:00
("URL" , 35,
lambda bc, c, scs, d: d),
2019-02-20 19:25:41 +01:00
("Capabilities", 50,
lambda bc, c, scs, d: ", ".join(subcategory_text(bc, c, sc) for sc in scs
if subcategory_text(bc, c, sc))),
2019-02-20 19:25:41 +01:00
("Authentication", 16,
lambda bc, c, scs, d: AUTH_MAP.get(c, "")),
2019-02-20 19:25:41 +01:00
)
2019-01-09 14:21:19 +01:00
def generate_output(columns, categories, domains):
thead = []
append = thead.append
append("<tr>")
for column in columns:
append(" <th>" + column[0] + "</th>")
append("</tr>")
tbody = []
append = tbody.append
for bcat, base in categories.items():
if bcat and base:
name = BASE_MAP.get(bcat) or (bcat.capitalize() + " Instances")
append('\n<tr>\n <td colspan="4"><strong>' +
name + '</strong></td>\n</tr>')
clist = base.items()
else:
clist = sorted(base.items(), key=category_key)
for category, subcategories in clist:
append("<tr>")
for column in columns:
domain = domains[category]
content = column[2](bcat, category, subcategories, domain)
append(" <td>" + content + "</td>")
append("</tr>")
TEMPLATE = """# Supported Sites
<!-- auto-generated by {} -->
2023-12-15 17:51:21 +01:00
Consider all listed sites to potentially be NSFW.
<table>
<thead valign="bottom">
{}
</thead>
<tbody valign="top">
{}
</tbody>
</table>
"""
return TEMPLATE.format(
"/".join(os.path.normpath(__file__).split(os.sep)[-2:]),
"\n".join(thead),
"\n".join(tbody),
)
2021-02-20 22:31:21 +01:00
categories, domains = build_extractor_list()
PATH = (sys.argv[1] if len(sys.argv) > 1 else
util.path("docs", "supportedsites.md"))
with util.lazy(PATH) as fp:
fp.write(generate_output(COLUMNS, categories, domains))