2015-10-04 04:13:50 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
# Copyright 2015 Mike Fährmann
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extract images from http://www.deviantart.com/"""
|
|
|
|
|
2015-12-06 21:13:57 +01:00
|
|
|
from .common import Extractor, AsynchronousExtractor, Message
|
2015-10-04 04:13:50 +02:00
|
|
|
from .. import text
|
|
|
|
import re
|
|
|
|
|
2015-12-06 21:13:57 +01:00
|
|
|
class DeviantArtUserExtractor(AsynchronousExtractor):
|
2015-12-06 14:24:27 +01:00
|
|
|
"""Extract all works of an artist on deviantart"""
|
2015-11-21 04:26:30 +01:00
|
|
|
category = "deviantart"
|
2015-12-06 21:13:57 +01:00
|
|
|
subcategory = "user"
|
2015-11-21 04:26:30 +01:00
|
|
|
directory_fmt = ["{category}", "{artist}"]
|
|
|
|
filename_fmt = "{category}_{index}_{title}.{extension}"
|
2015-12-06 14:24:27 +01:00
|
|
|
pattern = [r"(?:https?://)?([^\.]+)\.deviantart\.com(?:/gallery)?/?$"]
|
2015-11-21 04:26:30 +01:00
|
|
|
|
2015-10-05 17:06:58 +02:00
|
|
|
def __init__(self, match):
|
|
|
|
AsynchronousExtractor.__init__(self)
|
2015-10-04 04:13:50 +02:00
|
|
|
self.session.cookies["agegate_state"] = "1"
|
|
|
|
self.artist = match.group(1)
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
metadata = self.get_job_metadata()
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, metadata
|
|
|
|
for url, data in self.get_works():
|
|
|
|
data.update(metadata)
|
|
|
|
yield Message.Url, url, data
|
|
|
|
|
|
|
|
def get_works(self):
|
|
|
|
"""Yield all work-items for a deviantart-artist"""
|
|
|
|
url = "http://{}.deviantart.com/gallery/".format(self.artist)
|
|
|
|
params = {"catpath": "/", "offset": 0}
|
|
|
|
while True:
|
|
|
|
page = self.request(url, params=params).text
|
|
|
|
_, pos = text.extract(page, '<div data-dwait-click="GMI.wake"', '')
|
|
|
|
while True:
|
|
|
|
image_info, pos = text.extract(page, '<a class="thumb', '</a>', pos)
|
|
|
|
if not image_info:
|
|
|
|
break
|
|
|
|
yield self.get_image_metadata(image_info)
|
|
|
|
if pos == 0:
|
|
|
|
break
|
|
|
|
params["offset"] += 24
|
|
|
|
|
|
|
|
def get_job_metadata(self):
|
|
|
|
"""Collect metadata for extractor-job"""
|
|
|
|
return {
|
2015-11-21 04:26:30 +01:00
|
|
|
"category": self.category,
|
2015-10-04 04:13:50 +02:00
|
|
|
"artist": self.artist,
|
|
|
|
}
|
|
|
|
|
|
|
|
def get_image_metadata(self, image):
|
|
|
|
"""Collect metadata for an image"""
|
2015-12-06 14:24:27 +01:00
|
|
|
tmatch = self.extract_data(image, 'title',
|
2015-11-21 04:26:30 +01:00
|
|
|
r'(.+) by (.+), ([A-Z][a-z]{2} \d+, \d{4}) in')
|
2015-12-06 14:24:27 +01:00
|
|
|
hmatch = self.extract_data(image, 'href', r'[^"]+-(\d+)')
|
|
|
|
|
|
|
|
url, pos = text.extract(image, ' data-super-full-img="', '"', tmatch.end())
|
|
|
|
if url:
|
|
|
|
width , pos = text.extract(image, ' data-super-full-width="', '"', pos)
|
|
|
|
height, pos = text.extract(image, ' data-super-full-height="', '"', pos)
|
2015-10-04 04:13:50 +02:00
|
|
|
else:
|
2015-12-06 14:24:27 +01:00
|
|
|
url, pos = text.extract(image, ' data-super-img="', '"', pos)
|
2015-10-04 04:13:50 +02:00
|
|
|
if url:
|
|
|
|
width , pos = text.extract(image, ' data-super-width="', '"', pos)
|
|
|
|
height, pos = text.extract(image, ' data-super-height="', '"', pos)
|
2015-12-06 14:24:27 +01:00
|
|
|
else:
|
|
|
|
page = self.request(hmatch.group(0)).text
|
|
|
|
_ , pos = text.extract(page, ' class="dev-content-normal "', '')
|
|
|
|
url , pos = text.extract(page, ' src="', '"', pos)
|
|
|
|
width , pos = text.extract(page, ' width="', '"', pos)
|
|
|
|
height, pos = text.extract(page, ' height="', '"', pos)
|
|
|
|
return url, text.nameext_from_url(url, {
|
|
|
|
"index": hmatch.group(1),
|
|
|
|
"title": text.unescape(tmatch.group(1)),
|
|
|
|
"artist": tmatch.group(2),
|
|
|
|
"date": tmatch.group(3),
|
2015-10-04 04:13:50 +02:00
|
|
|
"width": width,
|
|
|
|
"height": height,
|
2015-12-06 14:24:27 +01:00
|
|
|
})
|
2015-10-04 04:13:50 +02:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def extract_data(txt, attr, pattern):
|
2015-12-06 14:24:27 +01:00
|
|
|
"""Extract a HTML attribute and apply a regex to it"""
|
2015-10-04 04:13:50 +02:00
|
|
|
txt, _ = text.extract(txt, ' %s="' % attr, '"')
|
|
|
|
return re.match(pattern, txt)
|
2015-12-06 21:13:57 +01:00
|
|
|
|
|
|
|
|
|
|
|
class DeviantArtImageExtractor(Extractor):
|
|
|
|
"""Extract a single image from deviantart"""
|
|
|
|
category = "deviantart"
|
|
|
|
subcategory = "image"
|
|
|
|
directory_fmt = ["{category}", "{artist}"]
|
|
|
|
filename_fmt = "{category}_{index}_{title}.{extension}"
|
|
|
|
pattern = [r"(?:https?://)?[^\.]+\.deviantart\.com/art/.+-(\d+)$"]
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self)
|
|
|
|
self.url = match.group(0)
|
|
|
|
self.index = match.group(1)
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
page = self.request(self.url).text
|
|
|
|
data = text.extract_all(page, (
|
|
|
|
('title' , '"og:title" content="', '"'),
|
|
|
|
('image' , '"og:image" content="', '"'),
|
|
|
|
('width' , '"og:image:width" content="', '"'),
|
|
|
|
('height', '"og:image:height" content="', '"'),
|
|
|
|
('url' , '"og:url" content="', '"'),
|
|
|
|
('description', '"og:description" content="', '"'),
|
|
|
|
('date' , '<span class="cc-time"><a title="', '"'),
|
|
|
|
), values={'category': self.category, "index": self.index})[0]
|
|
|
|
data["artist"] = text.extract(data["url"], "//", ".")[0]
|
|
|
|
data["date"] = text.extract(data["date"], "", ", ", 8)[0]
|
|
|
|
text.nameext_from_url(data["image"], data)
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, data
|
|
|
|
yield Message.Url, data["image"], data
|