1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-23 19:22:32 +01:00
gallery-dl/gallery_dl/extractor/2chan.py

113 lines
3.7 KiB
Python
Raw Normal View History

2017-07-14 08:44:31 +02:00
# -*- coding: utf-8 -*-
2022-07-12 15:46:51 +02:00
# Copyright 2017-2022 Mike Fährmann
2017-07-14 08:44:31 +02:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://www.2chan.net/"""
2017-07-14 08:44:31 +02:00
from .common import Extractor, Message
from .. import text
class _2chanThreadExtractor(Extractor):
2022-07-12 15:46:51 +02:00
"""Extractor for 2chan threads"""
2017-07-14 08:44:31 +02:00
category = "2chan"
subcategory = "thread"
directory_fmt = ("{category}", "{board_name}", "{thread}")
filename_fmt = "{tim}.{extension}"
archive_fmt = "{board}_{thread}_{tim}"
url_fmt = "https://{server}.2chan.net/{board}/src/{filename}"
generic extractor (#735) * Generic extractor, see issue #683 * Fix failed test_names test, no subcategory needed * Prefix directory_fmt with "generic" * Relax regex (would break some urls) * Flake8 compliance * pattern: don't require a scheme This fixes a bug when we force the generic extractor on urls without a scheme (that are allowed by all other extractors). * Fix using g: and r: on urls without http(s) scheme Almost all extractors accept urls without an initial http(s) scheme. Many extractors also allow for generic subdomains in their "pattern" variable; some of them implement this with the regex character class "[^.]+" (everything but a dot). This leads to a problem when the extractor is given a url starting with g: or r: (to force using the generic or recursive extractor) and without the http(s) scheme: e.g. with "r:foobar.tumblr.com" the "r:" is wrongly considered part of the subdomain. This commit fixes the bug, replacing the too generic "[^.]+" with the more specific "[\w-]+" (letters, digits and "-", the only characters allowed in domain names), which is already used by some extractors. * Relax imageurl_pattern_ext: allow relative urls * First round of small suggested changes * Support image urls starting with "//" * self.baseurl: remove trailing slash * Relax regexp (didn't catch some image urls) * Some fixes and cleanup * Fix domain pattern; option to enable extractor Fixed the domain section for "pattern", to pass "test_add" and "test_add_module" tests. Added the "enabled" configuration option (default False) to enable the generic extractor. Using "g(eneric):URL" forces using the extractor.
2021-12-29 22:39:29 +01:00
pattern = r"(?:https?://)?([\w-]+)\.2chan\.net/([^/]+)/res/(\d+)"
2022-07-12 15:46:51 +02:00
test = ("https://dec.2chan.net/70/res/14565.htm", {
"pattern": r"https://dec\.2chan\.net/70/src/\d{13}\.jpg",
"count": ">= 3",
"keyword": {
"board": "70",
"board_name": "新板提案",
"com": str,
"fsize": r"re:\d+",
"name": "名無し",
"no": r"re:1[45]\d\d\d",
"now": r"re:22/../..\(.\)..:..:..",
"post": "無題",
"server": "dec",
"thread": "14565",
"tim": r"re:^\d{13}$",
"time": r"re:^\d{10}$",
"title": "ヒロアカ板"
},
})
2017-07-14 08:44:31 +02:00
def __init__(self, match):
Extractor.__init__(self, match)
self.server, self.board, self.thread = match.groups()
2017-07-14 08:44:31 +02:00
def items(self):
url = "https://{}.2chan.net/{}/res/{}.htm".format(
self.server, self.board, self.thread)
page = self.request(url).text
data = self.metadata(page)
2017-07-14 08:44:31 +02:00
yield Message.Directory, data
for post in self.posts(page):
if "filename" not in post:
continue
post.update(data)
url = self.url_fmt.format_map(post)
2017-07-14 08:44:31 +02:00
yield Message.Url, url, post
def metadata(self, page):
2017-07-14 08:44:31 +02:00
"""Collect metadata for extractor-job"""
title = text.extract(page, "<title>", "</title>")[0]
title, _, boardname = title.rpartition(" - ")
return {
"server": self.server,
"title": title,
"board": self.board,
"board_name": boardname[:-4],
2017-07-14 08:44:31 +02:00
"thread": self.thread,
}
def posts(self, page):
"""Build a list of all post-objects"""
page = text.extract(
2018-11-10 19:15:21 +01:00
page, '<div class="thre"', '<div style="clear:left"></div>')[0]
2017-07-14 08:44:31 +02:00
return [
self.parse(post)
for post in page.split('<table border=0>')
]
def parse(self, post):
"""Build post-object by extracting data from an HTML post"""
data = self._extract_post(post)
2019-12-03 16:53:08 +01:00
if data["name"]:
data["name"] = data["name"].strip()
2020-11-24 16:41:47 +01:00
path = text.extract(post, '<a href="/', '"')[0]
if path and not path.startswith("bin/jump"):
2017-07-14 08:44:31 +02:00
self._extract_image(post, data)
data["tim"], _, data["extension"] = data["filename"].partition(".")
data["time"] = data["tim"][:-3]
data["ext"] = "." + data["extension"]
return data
@staticmethod
def _extract_post(post):
return text.extract_all(post, (
2019-12-03 16:53:08 +01:00
("post", 'class="csb">' , '<'),
("name", 'class="cnm">' , '<'),
("now" , 'class="cnw">' , '<'),
("no" , 'class="cno">No.', '<'),
2017-07-14 13:24:09 +02:00
(None , '<blockquote', ''),
("com" , '>', '</blockquote>'),
2017-07-14 08:44:31 +02:00
))[0]
@staticmethod
def _extract_image(post, data):
text.extract_all(post, (
2017-07-20 15:01:47 +02:00
(None , '_blank', ''),
("filename", '>', '<'),
2017-07-14 08:44:31 +02:00
("fsize" , '(', ' '),
), 0, data)