2021-02-21 22:57:37 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extractors for https://cyberdrop.me/"""
|
|
|
|
|
2021-12-21 19:24:17 +01:00
|
|
|
from . import lolisafe
|
2023-11-26 18:03:13 +01:00
|
|
|
from .common import Message
|
2021-02-21 20:42:45 +01:00
|
|
|
from .. import text
|
|
|
|
|
|
|
|
|
2022-04-15 15:02:30 +02:00
|
|
|
class CyberdropAlbumExtractor(lolisafe.LolisafeAlbumExtractor):
|
2021-02-21 20:42:45 +01:00
|
|
|
category = "cyberdrop"
|
|
|
|
root = "https://cyberdrop.me"
|
2022-04-15 15:05:04 +02:00
|
|
|
pattern = r"(?:https?://)?(?:www\.)?cyberdrop\.(?:me|to)/a/([^/?#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://cyberdrop.me/a/ID"
|
2021-02-21 20:42:45 +01:00
|
|
|
|
2023-11-26 18:03:13 +01:00
|
|
|
def items(self):
|
|
|
|
files, data = self.fetch_album(self.album_id)
|
|
|
|
|
|
|
|
yield Message.Directory, data
|
|
|
|
for data["num"], file in enumerate(files, 1):
|
|
|
|
file.update(data)
|
|
|
|
text.nameext_from_url(file["name"], file)
|
|
|
|
file["name"], sep, file["id"] = file["filename"].rpartition("-")
|
|
|
|
yield Message.Url, file["url"], file
|
|
|
|
|
2021-12-21 19:24:17 +01:00
|
|
|
def fetch_album(self, album_id):
|
2023-11-26 18:03:13 +01:00
|
|
|
url = "{}/a/{}".format(self.root, album_id)
|
|
|
|
page = self.request(url).text
|
|
|
|
extr = text.extract_from(page)
|
|
|
|
|
|
|
|
desc = extr('property="og:description" content="', '"')
|
|
|
|
if desc.startswith("A privacy-focused censorship-resistant file "
|
|
|
|
"sharing platform free for everyone."):
|
|
|
|
desc = ""
|
|
|
|
extr('id="title"', "")
|
|
|
|
|
|
|
|
album = {
|
2021-02-22 19:36:34 +01:00
|
|
|
"album_id" : self.album_id,
|
2023-11-26 18:03:13 +01:00
|
|
|
"album_name" : text.unescape(extr('title="', '"')),
|
|
|
|
"album_size" : text.parse_bytes(extr(
|
|
|
|
'<p class="title">', "B")),
|
|
|
|
"date" : text.parse_datetime(extr(
|
|
|
|
'<p class="title">', '<'), "%d.%m.%Y"),
|
|
|
|
"description": text.unescape(text.unescape( # double
|
|
|
|
desc.rpartition(" [R")[0])),
|
2021-02-22 19:36:34 +01:00
|
|
|
}
|
2023-11-26 18:03:13 +01:00
|
|
|
|
|
|
|
file_ids = list(text.extract_iter(page, 'id="file" href="/f/', '"'))
|
|
|
|
album["count"] = len(file_ids)
|
|
|
|
return self._extract_files(file_ids), album
|
|
|
|
|
|
|
|
def _extract_files(self, file_ids):
|
|
|
|
for file_id in file_ids:
|
|
|
|
url = "{}/api/f/{}".format(self.root, file_id)
|
|
|
|
yield self.request(url).json()
|