1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-23 03:02:50 +01:00
gallery-dl/gallery_dl/extractor/warosu.py

94 lines
3.3 KiB
Python
Raw Normal View History

2017-08-18 19:52:58 +02:00
# -*- coding: utf-8 -*-
# Copyright 2017-2023 Mike Fährmann
2017-08-18 19:52:58 +02:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
2022-12-07 21:23:45 +01:00
"""Extractors for https://warosu.org/"""
2017-08-18 19:52:58 +02:00
from .common import Extractor, Message
from .. import text
class WarosuThreadExtractor(Extractor):
2022-12-07 21:23:45 +01:00
"""Extractor for threads on warosu.org"""
2017-08-18 19:52:58 +02:00
category = "warosu"
subcategory = "thread"
2022-12-07 21:23:45 +01:00
root = "https://warosu.org"
directory_fmt = ("{category}", "{board}", "{thread} - {title}")
filename_fmt = "{tim}-{filename}.{extension}"
archive_fmt = "{board}_{thread}_{tim}"
pattern = r"(?:https?://)?(?:www\.)?warosu\.org/([^/]+)/thread/(\d+)"
example = "https://warosu.org/a/thread/12345"
2017-08-18 19:52:58 +02:00
def __init__(self, match):
Extractor.__init__(self, match)
2017-08-18 19:52:58 +02:00
self.board, self.thread = match.groups()
def items(self):
url = "{}/{}/thread/{}".format(self.root, self.board, self.thread)
2017-08-18 19:52:58 +02:00
page = self.request(url).text
2022-12-07 21:23:45 +01:00
data = self.metadata(page)
2017-08-18 19:52:58 +02:00
posts = self.posts(page)
if not data["title"]:
2022-12-07 21:23:45 +01:00
data["title"] = text.unescape(text.remove_html(
posts[0]["com"]))[:50]
2017-08-18 19:52:58 +02:00
yield Message.Directory, data
for post in posts:
if "image" in post:
for key in ("w", "h", "no", "time", "tim"):
post[key] = text.parse_int(post[key])
post.update(data)
yield Message.Url, post["image"], post
2017-08-18 19:52:58 +02:00
2022-12-07 21:23:45 +01:00
def metadata(self, page):
boardname = text.extr(page, "<title>", "</title>")
2023-10-13 23:03:39 +02:00
title = text.unescape(text.extr(page, "class=filetitle>", "<"))
2017-08-18 19:52:58 +02:00
return {
2022-12-07 21:23:45 +01:00
"board" : self.board,
"board_name": boardname.rpartition(" - ")[2],
2022-12-07 21:23:45 +01:00
"thread" : self.thread,
"title" : title,
2017-08-18 19:52:58 +02:00
}
def posts(self, page):
2022-12-07 21:23:45 +01:00
"""Build a list of all post objects"""
2023-10-13 23:03:39 +02:00
page = text.extr(page, "<div class=content", "</form>")
needle = "<table>"
2017-08-18 19:52:58 +02:00
return [self.parse(post) for post in page.split(needle)]
def parse(self, post):
2022-12-07 21:23:45 +01:00
"""Build post object by extracting data from an HTML post"""
2017-08-18 19:52:58 +02:00
data = self._extract_post(post)
2023-10-13 23:03:39 +02:00
if "<span> File:" in post:
2017-08-18 19:52:58 +02:00
self._extract_image(post, data)
part = data["image"].rpartition("/")[2]
data["tim"], _, data["extension"] = part.partition(".")
data["ext"] = "." + data["extension"]
return data
2023-10-13 23:03:39 +02:00
def _extract_post(self, post):
2022-12-07 21:23:45 +01:00
extr = text.extract_from(post)
return {
2023-10-13 23:03:39 +02:00
"no" : extr("id=p", ">"),
"name": extr("class=postername>", "<").strip(),
"time": extr("class=posttime title=", "000>"),
"now" : extr("", "<").strip(),
2022-12-07 21:23:45 +01:00
"com" : text.unescape(text.remove_html(extr(
2023-10-13 23:03:39 +02:00
"<blockquote>", "</blockquote>").strip())),
2022-12-07 21:23:45 +01:00
}
2017-08-18 19:52:58 +02:00
2023-10-13 23:03:39 +02:00
def _extract_image(self, post, data):
2022-12-07 21:23:45 +01:00
extr = text.extract_from(post)
2023-10-13 23:03:39 +02:00
data["fsize"] = extr("<span> File: ", ", ")
2022-12-07 21:23:45 +01:00
data["w"] = extr("", "x")
data["h"] = extr("", ", ")
2023-10-13 23:03:39 +02:00
data["filename"] = text.unquote(extr(
"", "<").rstrip().rpartition(".")[0])
extr("<br>", "")
data["image"] = self.root + extr("<a href=", ">")