1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-25 20:22:36 +01:00
gallery-dl/gallery_dl/extractor/2chan.py

95 lines
3.1 KiB
Python
Raw Normal View History

2017-07-14 08:44:31 +02:00
# -*- coding: utf-8 -*-
# Copyright 2017-2023 Mike Fährmann
2017-07-14 08:44:31 +02:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://www.2chan.net/"""
2017-07-14 08:44:31 +02:00
from .common import Extractor, Message
from .. import text
class _2chanThreadExtractor(Extractor):
2022-07-12 15:46:51 +02:00
"""Extractor for 2chan threads"""
2017-07-14 08:44:31 +02:00
category = "2chan"
subcategory = "thread"
directory_fmt = ("{category}", "{board_name}", "{thread}")
filename_fmt = "{tim}.{extension}"
archive_fmt = "{board}_{thread}_{tim}"
url_fmt = "https://{server}.2chan.net/{board}/src/{filename}"
pattern = r"(?:https?://)?([\w-]+)\.2chan\.net/([^/?#]+)/res/(\d+)"
example = "https://dec.2chan.net/12/res/12345.htm"
2017-07-14 08:44:31 +02:00
def __init__(self, match):
Extractor.__init__(self, match)
self.server, self.board, self.thread = match.groups()
2017-07-14 08:44:31 +02:00
def items(self):
url = "https://{}.2chan.net/{}/res/{}.htm".format(
self.server, self.board, self.thread)
page = self.request(url).text
data = self.metadata(page)
2017-07-14 08:44:31 +02:00
yield Message.Directory, data
for post in self.posts(page):
if "filename" not in post:
continue
post.update(data)
url = self.url_fmt.format_map(post)
2017-07-14 08:44:31 +02:00
yield Message.Url, url, post
def metadata(self, page):
2017-07-14 08:44:31 +02:00
"""Collect metadata for extractor-job"""
title, _, boardname = text.extr(
page, "<title>", "</title>").rpartition(" - ")
2017-07-14 08:44:31 +02:00
return {
"server": self.server,
"title": title,
"board": self.board,
"board_name": boardname[:-4],
2017-07-14 08:44:31 +02:00
"thread": self.thread,
}
def posts(self, page):
"""Build a list of all post-objects"""
page = text.extr(
page, '<div class="thre"', '<div style="clear:left"></div>')
2017-07-14 08:44:31 +02:00
return [
self.parse(post)
for post in page.split('<table border=0>')
]
def parse(self, post):
"""Build post-object by extracting data from an HTML post"""
data = self._extract_post(post)
2019-12-03 16:53:08 +01:00
if data["name"]:
data["name"] = data["name"].strip()
path = text.extr(post, '<a href="/', '"')
2020-11-24 16:41:47 +01:00
if path and not path.startswith("bin/jump"):
2017-07-14 08:44:31 +02:00
self._extract_image(post, data)
data["tim"], _, data["extension"] = data["filename"].partition(".")
data["time"] = data["tim"][:-3]
data["ext"] = "." + data["extension"]
return data
@staticmethod
def _extract_post(post):
return text.extract_all(post, (
2019-12-03 16:53:08 +01:00
("post", 'class="csb">' , '<'),
("name", 'class="cnm">' , '<'),
("now" , 'class="cnw">' , '<'),
("no" , 'class="cno">No.', '<'),
2017-07-14 13:24:09 +02:00
(None , '<blockquote', ''),
("com" , '>', '</blockquote>'),
2017-07-14 08:44:31 +02:00
))[0]
@staticmethod
def _extract_image(post, data):
text.extract_all(post, (
2017-07-20 15:01:47 +02:00
(None , '_blank', ''),
("filename", '>', '<'),
2017-07-14 08:44:31 +02:00
("fsize" , '(', ' '),
), 0, data)