2017-08-18 19:52:58 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-09-11 16:30:55 +02:00
|
|
|
# Copyright 2017-2023 Mike Fährmann
|
2017-08-18 19:52:58 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2022-12-07 21:23:45 +01:00
|
|
|
"""Extractors for https://warosu.org/"""
|
2017-08-18 19:52:58 +02:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
|
|
|
from .. import text
|
|
|
|
|
|
|
|
|
|
|
|
class WarosuThreadExtractor(Extractor):
|
2022-12-07 21:23:45 +01:00
|
|
|
"""Extractor for threads on warosu.org"""
|
2017-08-18 19:52:58 +02:00
|
|
|
category = "warosu"
|
|
|
|
subcategory = "thread"
|
2022-12-07 21:23:45 +01:00
|
|
|
root = "https://warosu.org"
|
2019-02-08 13:45:40 +01:00
|
|
|
directory_fmt = ("{category}", "{board}", "{thread} - {title}")
|
2018-09-28 13:03:12 +02:00
|
|
|
filename_fmt = "{tim}-{filename}.{extension}"
|
2018-01-30 22:49:16 +01:00
|
|
|
archive_fmt = "{board}_{thread}_{tim}"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = r"(?:https?://)?(?:www\.)?warosu\.org/([^/]+)/thread/(\d+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://warosu.org/a/thread/12345"
|
2017-08-18 19:52:58 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2017-08-18 19:52:58 +02:00
|
|
|
self.board, self.thread = match.groups()
|
|
|
|
|
|
|
|
def items(self):
|
2018-09-28 13:03:12 +02:00
|
|
|
url = "{}/{}/thread/{}".format(self.root, self.board, self.thread)
|
2017-08-18 19:52:58 +02:00
|
|
|
page = self.request(url).text
|
2022-12-07 21:23:45 +01:00
|
|
|
data = self.metadata(page)
|
2017-08-18 19:52:58 +02:00
|
|
|
posts = self.posts(page)
|
|
|
|
|
|
|
|
if not data["title"]:
|
2022-12-07 21:23:45 +01:00
|
|
|
data["title"] = text.unescape(text.remove_html(
|
|
|
|
posts[0]["com"]))[:50]
|
2017-08-18 19:52:58 +02:00
|
|
|
|
|
|
|
yield Message.Directory, data
|
2018-09-28 13:03:12 +02:00
|
|
|
for post in posts:
|
|
|
|
if "image" in post:
|
|
|
|
for key in ("w", "h", "no", "time", "tim"):
|
|
|
|
post[key] = text.parse_int(post[key])
|
|
|
|
post.update(data)
|
|
|
|
yield Message.Url, post["image"], post
|
2017-08-18 19:52:58 +02:00
|
|
|
|
2022-12-07 21:23:45 +01:00
|
|
|
def metadata(self, page):
|
2022-11-04 23:39:38 +01:00
|
|
|
boardname = text.extr(page, "<title>", "</title>")
|
2023-10-13 23:03:39 +02:00
|
|
|
title = text.unescape(text.extr(page, "class=filetitle>", "<"))
|
2017-08-18 19:52:58 +02:00
|
|
|
return {
|
2022-12-07 21:23:45 +01:00
|
|
|
"board" : self.board,
|
2024-03-06 01:28:47 +01:00
|
|
|
"board_name": boardname.split(" - ")[1],
|
2022-12-07 21:23:45 +01:00
|
|
|
"thread" : self.thread,
|
|
|
|
"title" : title,
|
2017-08-18 19:52:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
def posts(self, page):
|
2022-12-07 21:23:45 +01:00
|
|
|
"""Build a list of all post objects"""
|
2023-10-13 23:03:39 +02:00
|
|
|
page = text.extr(page, "<div class=content", "</form>")
|
|
|
|
needle = "<table>"
|
2017-08-18 19:52:58 +02:00
|
|
|
return [self.parse(post) for post in page.split(needle)]
|
|
|
|
|
|
|
|
def parse(self, post):
|
2022-12-07 21:23:45 +01:00
|
|
|
"""Build post object by extracting data from an HTML post"""
|
2017-08-18 19:52:58 +02:00
|
|
|
data = self._extract_post(post)
|
2024-07-26 21:09:07 +02:00
|
|
|
if "<span class=fileinfo>" in post and self._extract_image(post, data):
|
2017-08-18 19:52:58 +02:00
|
|
|
part = data["image"].rpartition("/")[2]
|
|
|
|
data["tim"], _, data["extension"] = part.partition(".")
|
|
|
|
data["ext"] = "." + data["extension"]
|
|
|
|
return data
|
|
|
|
|
2023-10-13 23:03:39 +02:00
|
|
|
def _extract_post(self, post):
|
2022-12-07 21:23:45 +01:00
|
|
|
extr = text.extract_from(post)
|
|
|
|
return {
|
2023-10-13 23:03:39 +02:00
|
|
|
"no" : extr("id=p", ">"),
|
|
|
|
"name": extr("class=postername>", "<").strip(),
|
|
|
|
"time": extr("class=posttime title=", "000>"),
|
|
|
|
"now" : extr("", "<").strip(),
|
2022-12-07 21:23:45 +01:00
|
|
|
"com" : text.unescape(text.remove_html(extr(
|
2023-10-13 23:03:39 +02:00
|
|
|
"<blockquote>", "</blockquote>").strip())),
|
2022-12-07 21:23:45 +01:00
|
|
|
}
|
2017-08-18 19:52:58 +02:00
|
|
|
|
2023-10-13 23:03:39 +02:00
|
|
|
def _extract_image(self, post, data):
|
2022-12-07 21:23:45 +01:00
|
|
|
extr = text.extract_from(post)
|
2024-07-26 21:09:07 +02:00
|
|
|
data["fsize"] = extr("<span class=fileinfo> File: ", ", ")
|
2022-12-07 21:23:45 +01:00
|
|
|
data["w"] = extr("", "x")
|
|
|
|
data["h"] = extr("", ", ")
|
2023-10-13 23:03:39 +02:00
|
|
|
data["filename"] = text.unquote(extr(
|
|
|
|
"", "<").rstrip().rpartition(".")[0])
|
|
|
|
extr("<br>", "")
|
2023-11-24 02:44:55 +01:00
|
|
|
|
2024-03-06 01:27:45 +01:00
|
|
|
url = extr("<a href=", ">")
|
|
|
|
if url:
|
|
|
|
if url[0] == "/":
|
|
|
|
data["image"] = self.root + url
|
|
|
|
else:
|
|
|
|
data["image"] = url
|
|
|
|
return True
|
|
|
|
return False
|