2022-07-15 12:38:30 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-01-01 14:07:15 +01:00
|
|
|
# Copyright 2022-2023 Mike Fährmann
|
2022-07-15 12:38:30 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2023-06-08 17:01:04 +02:00
|
|
|
"""Extractors for https://bunkrr.su/"""
|
2022-07-15 12:38:30 +02:00
|
|
|
|
|
|
|
from .lolisafe import LolisafeAlbumExtractor
|
2023-02-15 15:42:32 +01:00
|
|
|
from .. import text
|
2023-08-10 21:28:48 +02:00
|
|
|
from urllib.parse import urlsplit, urlunsplit
|
|
|
|
|
|
|
|
MEDIA_DOMAIN_OVERRIDES = {
|
|
|
|
"cdn9.bunkr.ru" : "c9.bunkr.ru",
|
|
|
|
"cdn12.bunkr.ru": "media-files12.bunkr.la",
|
2023-09-02 09:45:59 +02:00
|
|
|
"cdn-pizza.bunkr.ru": "pizza.bunkr.ru",
|
2023-08-10 21:28:48 +02:00
|
|
|
}
|
2022-07-15 12:38:30 +02:00
|
|
|
|
2023-08-12 21:41:08 +02:00
|
|
|
CDN_HOSTED_EXTENSIONS = (
|
|
|
|
".mp4", ".m4v", ".mov", ".webm", ".mkv", ".ts", ".wmv",
|
|
|
|
".zip", ".rar", ".7z",
|
|
|
|
)
|
|
|
|
|
2022-07-15 12:38:30 +02:00
|
|
|
|
|
|
|
class BunkrAlbumExtractor(LolisafeAlbumExtractor):
|
2023-06-08 17:01:04 +02:00
|
|
|
"""Extractor for bunkrr.su albums"""
|
2022-07-15 12:38:30 +02:00
|
|
|
category = "bunkr"
|
2023-06-08 17:01:04 +02:00
|
|
|
root = "https://bunkrr.su"
|
2023-06-08 14:10:25 +02:00
|
|
|
pattern = r"(?:https?://)?(?:app\.)?bunkr+\.(?:la|[sr]u|is|to)/a/([^/?#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://bunkrr.su/a/ID"
|
2022-07-15 12:38:30 +02:00
|
|
|
|
|
|
|
def fetch_album(self, album_id):
|
2023-02-15 15:42:32 +01:00
|
|
|
# album metadata
|
|
|
|
page = self.request(self.root + "/a/" + self.album_id).text
|
|
|
|
info = text.split_html(text.extr(
|
|
|
|
page, "<h1", "</div>").partition(">")[2])
|
|
|
|
count, _, size = info[1].split(None, 2)
|
|
|
|
|
|
|
|
pos = page.index('class="grid-images')
|
2023-09-30 18:05:12 +02:00
|
|
|
urls = list(text.extract_iter(page, '<a href="', '"', pos))
|
2022-12-01 18:02:32 +01:00
|
|
|
|
2023-09-30 18:05:12 +02:00
|
|
|
return self._extract_files(urls), {
|
2023-02-15 15:42:32 +01:00
|
|
|
"album_id" : self.album_id,
|
|
|
|
"album_name" : text.unescape(info[0]),
|
|
|
|
"album_size" : size[1:-1],
|
|
|
|
"description": text.unescape(info[2]) if len(info) > 2 else "",
|
2023-09-30 18:05:12 +02:00
|
|
|
"count" : len(urls),
|
2023-02-15 15:42:32 +01:00
|
|
|
}
|
2023-09-30 18:05:12 +02:00
|
|
|
|
|
|
|
def _extract_files(self, urls):
|
|
|
|
for url in urls:
|
|
|
|
if url.startswith("/"):
|
|
|
|
try:
|
|
|
|
page = self.request(self.root + text.unescape(url)).text
|
|
|
|
if url[1] == "v":
|
|
|
|
url = text.extr(page, '<source src="', '"')
|
|
|
|
else:
|
|
|
|
url = text.extr(page, '<img src="', '"')
|
|
|
|
except Exception as exc:
|
|
|
|
self.log.error("%s: %s", exc.__class__.__name__, exc)
|
|
|
|
continue
|
|
|
|
|
|
|
|
else:
|
|
|
|
if url.lower().endswith(CDN_HOSTED_EXTENSIONS):
|
|
|
|
scheme, domain, path, query, fragment = urlsplit(url)
|
|
|
|
if domain in MEDIA_DOMAIN_OVERRIDES:
|
|
|
|
domain = MEDIA_DOMAIN_OVERRIDES[domain]
|
|
|
|
else:
|
|
|
|
domain = domain.replace("cdn", "media-files", 1)
|
|
|
|
url = urlunsplit((scheme, domain, path, query, fragment))
|
|
|
|
|
|
|
|
yield {"file": text.unescape(url)}
|