2015-10-28 23:26:47 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-02-08 13:45:40 +01:00
|
|
|
# Copyright 2015-2019 Mike Fährmann
|
2015-10-28 23:26:47 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extract images from https://imgth.com/"""
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
|
|
|
from .. import text
|
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2015-12-13 04:36:44 +01:00
|
|
|
class ImgthGalleryExtractor(Extractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for image galleries from imgth.com"""
|
2015-11-21 04:26:30 +01:00
|
|
|
category = "imgth"
|
2016-09-12 10:20:57 +02:00
|
|
|
subcategory = "gallery"
|
2019-02-08 13:45:40 +01:00
|
|
|
directory_fmt = ("{category}", "{gallery_id} {title}")
|
2017-09-10 22:20:47 +02:00
|
|
|
filename_fmt = "{category}_{gallery_id}_{num:>03}.{extension}"
|
2018-01-30 22:49:16 +01:00
|
|
|
archive_fmt = "{gallery_id}_{num}"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = r"(?:https?://)?imgth\.com/gallery/(\d+)"
|
|
|
|
test = ("http://imgth.com/gallery/37/wallpaper-anime", {
|
2015-12-13 04:36:44 +01:00
|
|
|
"url": "4ae1d281ca2b48952cf5cca57e9914402ad72748",
|
2019-02-14 16:07:17 +01:00
|
|
|
"keyword": "6f8c00d6849ea89d1a028764675ec1fe9dbd87e2",
|
2019-02-08 13:45:40 +01:00
|
|
|
})
|
2015-11-21 04:26:30 +01:00
|
|
|
|
2015-10-28 23:26:47 +01:00
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2015-10-28 23:26:47 +01:00
|
|
|
self.gid = match.group(1)
|
2019-02-12 10:20:21 +01:00
|
|
|
self.url_base = "https://imgth.com/gallery/" + self.gid + "/g/page/"
|
2015-10-28 23:26:47 +01:00
|
|
|
|
|
|
|
def items(self):
|
2019-02-12 10:20:21 +01:00
|
|
|
page = self.request(self.url_base + "0").text
|
|
|
|
data = self.metadata(page)
|
2015-10-28 23:26:47 +01:00
|
|
|
yield Message.Directory, data
|
2019-02-12 10:20:21 +01:00
|
|
|
for data["num"], url in enumerate(self.images(page), 1):
|
2015-11-16 17:32:26 +01:00
|
|
|
yield Message.Url, url, text.nameext_from_url(url, data)
|
2015-12-13 04:36:44 +01:00
|
|
|
|
2019-02-12 10:20:21 +01:00
|
|
|
def images(self, page):
|
2015-12-13 04:36:44 +01:00
|
|
|
"""Yield all image urls for this gallery"""
|
2015-10-28 23:26:47 +01:00
|
|
|
pnum = 0
|
|
|
|
while True:
|
2019-02-12 10:20:21 +01:00
|
|
|
thumbs = text.extract(page, '<ul class="thumbnails">', '</ul>')[0]
|
|
|
|
for url in text.extract_iter(thumbs, '<img src="', '"'):
|
2019-09-19 14:56:48 +02:00
|
|
|
yield "https://imgth.com/images" + url[24:]
|
2019-02-12 10:20:21 +01:00
|
|
|
if '<li class="next">' not in page:
|
2015-10-28 23:26:47 +01:00
|
|
|
return
|
|
|
|
pnum += 1
|
2019-02-12 10:20:21 +01:00
|
|
|
page = self.request(self.url_base + str(pnum)).text
|
2015-10-28 23:26:47 +01:00
|
|
|
|
2019-02-12 10:20:21 +01:00
|
|
|
def metadata(self, page):
|
2015-10-28 23:26:47 +01:00
|
|
|
"""Collect metadata for extractor-job"""
|
2015-12-13 04:36:44 +01:00
|
|
|
return text.extract_all(page, (
|
2015-11-03 00:11:08 +01:00
|
|
|
("title", '<h1>', '</h1>'),
|
|
|
|
("count", 'total of images in this gallery: ', ' '),
|
|
|
|
("date" , 'created on ', ' by <'),
|
|
|
|
(None , 'href="/users/', ''),
|
|
|
|
("user" , '>', '<'),
|
2017-09-10 22:20:47 +02:00
|
|
|
), values={"gallery_id": self.gid})[0]
|