2017-05-30 17:43:02 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-09-11 16:30:55 +02:00
|
|
|
# Copyright 2017-2023 Mike Fährmann
|
2017-05-30 17:43:02 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2020-11-15 02:48:08 +01:00
|
|
|
"""Extractors for https://www.flickr.com/"""
|
2017-05-30 17:43:02 +02:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2018-05-10 18:26:10 +02:00
|
|
|
from .. import text, oauth, util, exception
|
2017-05-30 17:43:02 +02:00
|
|
|
|
2022-09-14 16:19:27 +02:00
|
|
|
BASE_PATTERN = r"(?:https?://)?(?:www\.|secure\.|m\.)?flickr\.com"
|
|
|
|
|
2017-05-30 17:43:02 +02:00
|
|
|
|
|
|
|
class FlickrExtractor(Extractor):
|
|
|
|
"""Base class for flickr extractors"""
|
|
|
|
category = "flickr"
|
2017-05-31 17:31:51 +02:00
|
|
|
filename_fmt = "{category}_{id}.{extension}"
|
2020-11-15 02:48:08 +01:00
|
|
|
directory_fmt = ("{category}", "{user[username]}")
|
|
|
|
archive_fmt = "{id}"
|
2023-07-21 22:38:39 +02:00
|
|
|
cookies_domain = None
|
2023-07-22 15:38:33 +02:00
|
|
|
request_interval = (1.0, 2.0)
|
2024-10-10 11:48:35 +02:00
|
|
|
request_interval_min = 0.5
|
2017-05-31 17:31:51 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2017-05-31 17:31:51 +02:00
|
|
|
self.item_id = match.group(1)
|
2023-07-25 20:09:44 +02:00
|
|
|
|
|
|
|
def _init(self):
|
|
|
|
self.api = FlickrAPI(self)
|
2017-06-06 14:22:36 +02:00
|
|
|
self.user = None
|
|
|
|
|
|
|
|
def items(self):
|
2019-05-14 18:12:02 +02:00
|
|
|
data = self.metadata()
|
2019-08-27 23:26:49 +02:00
|
|
|
extract = self.api._extract_format
|
2017-06-06 14:22:36 +02:00
|
|
|
for photo in self.photos():
|
2019-08-27 23:26:49 +02:00
|
|
|
try:
|
|
|
|
photo = extract(photo)
|
|
|
|
except Exception as exc:
|
|
|
|
self.log.warning(
|
2024-09-19 14:50:08 +02:00
|
|
|
"Skipping photo %s (%s: %s)",
|
|
|
|
photo["id"], exc.__class__.__name__, exc)
|
|
|
|
self.log.debug("", exc_info=exc)
|
2019-08-27 23:26:49 +02:00
|
|
|
else:
|
|
|
|
photo.update(data)
|
|
|
|
url = photo["url"]
|
2020-11-15 02:48:08 +01:00
|
|
|
yield Message.Directory, photo
|
2019-08-27 23:26:49 +02:00
|
|
|
yield Message.Url, url, text.nameext_from_url(url, photo)
|
2017-06-06 14:22:36 +02:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def metadata(self):
|
|
|
|
"""Return general metadata"""
|
2017-06-06 14:22:36 +02:00
|
|
|
self.user = self.api.urls_lookupUser(self.item_id)
|
|
|
|
return {"user": self.user}
|
|
|
|
|
|
|
|
def photos(self):
|
2019-05-14 18:12:02 +02:00
|
|
|
"""Return an iterable with all relevant photo objects"""
|
2017-05-30 17:43:02 +02:00
|
|
|
|
|
|
|
|
|
|
|
class FlickrImageExtractor(FlickrExtractor):
|
|
|
|
"""Extractor for individual images from flickr.com"""
|
|
|
|
subcategory = "image"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:"
|
2022-09-14 16:19:27 +02:00
|
|
|
r"(?:(?:www\.|secure\.|m\.)?flickr\.com/photos/[^/?#]+/"
|
2021-12-29 22:39:29 +01:00
|
|
|
r"|[\w-]+\.static\.?flickr\.com/(?:\d+/)+)(\d+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
r"|flic\.kr/p/([A-Za-z1-9]+))")
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.flickr.com/photos/USER/12345"
|
2017-05-30 17:43:02 +02:00
|
|
|
|
2017-06-01 18:14:33 +02:00
|
|
|
def __init__(self, match):
|
|
|
|
FlickrExtractor.__init__(self, match)
|
2019-02-08 12:03:10 +01:00
|
|
|
if not self.item_id:
|
2017-06-01 18:14:33 +02:00
|
|
|
alphabet = ("123456789abcdefghijkmnopqrstu"
|
|
|
|
"vwxyzABCDEFGHJKLMNPQRSTUVWXYZ")
|
|
|
|
self.item_id = util.bdecode(match.group(2), alphabet)
|
|
|
|
|
2017-05-30 17:43:02 +02:00
|
|
|
def items(self):
|
2019-05-14 18:12:02 +02:00
|
|
|
photo = self.api.photos_getInfo(self.item_id)
|
2017-05-30 17:43:02 +02:00
|
|
|
|
2024-08-14 09:44:04 +02:00
|
|
|
self.api._extract_metadata(photo)
|
2019-05-14 18:12:02 +02:00
|
|
|
if photo["media"] == "video" and self.api.videos:
|
|
|
|
self.api._extract_video(photo)
|
2017-05-30 17:43:02 +02:00
|
|
|
else:
|
2019-05-14 18:12:02 +02:00
|
|
|
self.api._extract_photo(photo)
|
2017-06-01 18:14:33 +02:00
|
|
|
|
2020-11-15 02:48:08 +01:00
|
|
|
photo["user"] = photo["owner"]
|
2017-06-01 18:14:33 +02:00
|
|
|
photo["title"] = photo["title"]["_content"]
|
2019-05-14 18:12:02 +02:00
|
|
|
photo["comments"] = text.parse_int(photo["comments"]["_content"])
|
|
|
|
photo["description"] = photo["description"]["_content"]
|
2017-06-01 18:14:33 +02:00
|
|
|
photo["tags"] = [t["raw"] for t in photo["tags"]["tag"]]
|
2019-05-14 18:12:02 +02:00
|
|
|
photo["date"] = text.parse_timestamp(photo["dateuploaded"])
|
|
|
|
photo["views"] = text.parse_int(photo["views"])
|
|
|
|
photo["id"] = text.parse_int(photo["id"])
|
2017-06-01 18:14:33 +02:00
|
|
|
|
|
|
|
if "location" in photo:
|
|
|
|
location = photo["location"]
|
|
|
|
for key, value in location.items():
|
|
|
|
if isinstance(value, dict):
|
|
|
|
location[key] = value["_content"]
|
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
url = photo["url"]
|
|
|
|
yield Message.Directory, photo
|
|
|
|
yield Message.Url, url, text.nameext_from_url(url, photo)
|
|
|
|
|
2017-05-30 17:43:02 +02:00
|
|
|
|
2017-05-31 17:31:51 +02:00
|
|
|
class FlickrAlbumExtractor(FlickrExtractor):
|
|
|
|
"""Extractor for photo albums from flickr.com"""
|
|
|
|
subcategory = "album"
|
2020-11-15 02:48:08 +01:00
|
|
|
directory_fmt = ("{category}", "{user[username]}",
|
|
|
|
"Albums", "{album[id]} {album[title]}")
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "a_{album[id]}_{id}"
|
2022-09-14 16:19:27 +02:00
|
|
|
pattern = BASE_PATTERN + r"/photos/([^/?#]+)/(?:album|set)s(?:/(\d+))?"
|
2023-09-11 17:32:59 +02:00
|
|
|
example = "https://www.flickr.com/photos/USER/albums/12345"
|
2017-05-31 17:31:51 +02:00
|
|
|
|
2017-06-06 14:22:36 +02:00
|
|
|
def __init__(self, match):
|
|
|
|
FlickrExtractor.__init__(self, match)
|
|
|
|
self.album_id = match.group(2)
|
2017-05-31 17:31:51 +02:00
|
|
|
|
2018-11-23 09:09:37 +01:00
|
|
|
def items(self):
|
|
|
|
if self.album_id:
|
|
|
|
return FlickrExtractor.items(self)
|
|
|
|
return self._album_items()
|
|
|
|
|
|
|
|
def _album_items(self):
|
2019-05-14 18:12:02 +02:00
|
|
|
data = FlickrExtractor.metadata(self)
|
2019-02-12 21:26:41 +01:00
|
|
|
data["_extractor"] = FlickrAlbumExtractor
|
2018-11-23 09:09:37 +01:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
for album in self.api.photosets_getList(self.user["nsid"]):
|
|
|
|
self.api._clean_info(album).update(data)
|
|
|
|
url = "https://www.flickr.com/photos/{}/albums/{}".format(
|
|
|
|
self.user["path_alias"], album["id"])
|
|
|
|
yield Message.Queue, url, album
|
2018-11-23 09:09:37 +01:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def metadata(self):
|
|
|
|
data = FlickrExtractor.metadata(self)
|
2024-08-30 10:24:03 +02:00
|
|
|
try:
|
|
|
|
data["album"] = self.api.photosets_getInfo(
|
|
|
|
self.album_id, self.user["nsid"])
|
|
|
|
except Exception:
|
|
|
|
data["album"] = {}
|
|
|
|
self.log.warning("%s: Unable to retrieve album metadata",
|
|
|
|
self.album_id)
|
2018-09-29 16:17:26 +02:00
|
|
|
return data
|
2017-06-06 14:22:36 +02:00
|
|
|
|
|
|
|
def photos(self):
|
2018-09-29 16:17:26 +02:00
|
|
|
return self.api.photosets_getPhotos(self.album_id)
|
2017-06-06 14:22:36 +02:00
|
|
|
|
|
|
|
|
|
|
|
class FlickrGalleryExtractor(FlickrExtractor):
|
2017-06-06 16:22:30 +02:00
|
|
|
"""Extractor for photo galleries from flickr.com"""
|
2017-06-06 14:22:36 +02:00
|
|
|
subcategory = "gallery"
|
2020-11-15 02:48:08 +01:00
|
|
|
directory_fmt = ("{category}", "{user[username]}",
|
|
|
|
"Galleries", "{gallery[gallery_id]} {gallery[title]}")
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "g_{gallery[id]}_{id}"
|
2022-09-14 16:19:27 +02:00
|
|
|
pattern = BASE_PATTERN + r"/photos/([^/?#]+)/galleries/(\d+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.flickr.com/photos/USER/galleries/12345/"
|
2017-06-06 14:22:36 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
FlickrExtractor.__init__(self, match)
|
|
|
|
self.gallery_id = match.group(2)
|
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def metadata(self):
|
|
|
|
data = FlickrExtractor.metadata(self)
|
|
|
|
data["gallery"] = self.api.galleries_getInfo(self.gallery_id)
|
|
|
|
return data
|
2017-06-06 14:22:36 +02:00
|
|
|
|
|
|
|
def photos(self):
|
|
|
|
return self.api.galleries_getPhotos(self.gallery_id)
|
2017-05-31 17:31:51 +02:00
|
|
|
|
|
|
|
|
2017-06-06 16:22:30 +02:00
|
|
|
class FlickrGroupExtractor(FlickrExtractor):
|
|
|
|
"""Extractor for group pools from flickr.com"""
|
|
|
|
subcategory = "group"
|
2020-11-15 02:48:08 +01:00
|
|
|
directory_fmt = ("{category}", "Groups", "{group[groupname]}")
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "G_{group[nsid]}_{id}"
|
2022-09-14 16:19:27 +02:00
|
|
|
pattern = BASE_PATTERN + r"/groups/([^/?#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.flickr.com/groups/NAME/"
|
2017-06-06 16:22:30 +02:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def metadata(self):
|
2017-06-06 16:22:30 +02:00
|
|
|
self.group = self.api.urls_lookupGroup(self.item_id)
|
|
|
|
return {"group": self.group}
|
|
|
|
|
|
|
|
def photos(self):
|
|
|
|
return self.api.groups_pools_getPhotos(self.group["nsid"])
|
|
|
|
|
|
|
|
|
2017-06-02 17:15:05 +02:00
|
|
|
class FlickrUserExtractor(FlickrExtractor):
|
|
|
|
"""Extractor for the photostream of a flickr user"""
|
|
|
|
subcategory = "user"
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "u_{user[nsid]}_{id}"
|
2022-09-14 16:19:27 +02:00
|
|
|
pattern = BASE_PATTERN + r"/photos/([^/?#]+)/?$"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.flickr.com/photos/USER/"
|
2017-06-02 17:15:05 +02:00
|
|
|
|
2017-06-06 14:22:36 +02:00
|
|
|
def photos(self):
|
2017-06-12 16:37:06 +02:00
|
|
|
return self.api.people_getPhotos(self.user["nsid"])
|
2017-06-02 17:15:05 +02:00
|
|
|
|
|
|
|
|
2017-06-06 14:22:36 +02:00
|
|
|
class FlickrFavoriteExtractor(FlickrExtractor):
|
2017-06-02 16:35:04 +02:00
|
|
|
"""Extractor for favorite photos of a flickr user"""
|
|
|
|
subcategory = "favorite"
|
2020-11-15 02:48:08 +01:00
|
|
|
directory_fmt = ("{category}", "{user[username]}", "Favorites")
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "f_{user[nsid]}_{id}"
|
2022-09-14 16:19:27 +02:00
|
|
|
pattern = BASE_PATTERN + r"/photos/([^/?#]+)/favorites"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.flickr.com/photos/USER/favorites"
|
2017-06-02 16:35:04 +02:00
|
|
|
|
2017-06-06 14:22:36 +02:00
|
|
|
def photos(self):
|
2017-06-12 16:37:06 +02:00
|
|
|
return self.api.favorites_getList(self.user["nsid"])
|
2017-06-02 16:35:04 +02:00
|
|
|
|
|
|
|
|
2017-06-13 08:01:32 +02:00
|
|
|
class FlickrSearchExtractor(FlickrExtractor):
|
2017-06-28 17:39:07 +02:00
|
|
|
"""Extractor for flickr photos based on search results"""
|
2017-06-13 08:01:32 +02:00
|
|
|
subcategory = "search"
|
2020-11-15 02:48:08 +01:00
|
|
|
directory_fmt = ("{category}", "Search", "{search[text]}")
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "s_{search}_{id}"
|
2022-09-14 16:19:27 +02:00
|
|
|
pattern = BASE_PATTERN + r"/search/?\?([^#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://flickr.com/search/?text=QUERY"
|
2017-06-13 08:01:32 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
FlickrExtractor.__init__(self, match)
|
2017-08-24 20:55:58 +02:00
|
|
|
self.search = text.parse_query(match.group(1))
|
2017-06-13 08:01:32 +02:00
|
|
|
if "text" not in self.search:
|
|
|
|
self.search["text"] = ""
|
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def metadata(self):
|
2017-06-13 08:01:32 +02:00
|
|
|
return {"search": self.search}
|
|
|
|
|
|
|
|
def photos(self):
|
|
|
|
return self.api.photos_search(self.search)
|
|
|
|
|
|
|
|
|
2018-05-10 21:57:45 +02:00
|
|
|
class FlickrAPI(oauth.OAuth1API):
|
2022-09-14 16:19:27 +02:00
|
|
|
"""Minimal interface for the flickr API
|
|
|
|
|
|
|
|
https://www.flickr.com/services/api/
|
|
|
|
"""
|
|
|
|
|
2017-06-12 09:36:14 +02:00
|
|
|
API_URL = "https://api.flickr.com/services/rest/"
|
2024-10-10 11:48:35 +02:00
|
|
|
API_KEY = "90c368449018a0cb880ea4889cbb8681"
|
|
|
|
API_SECRET = "e4b83e319c11e9e1"
|
2017-06-18 22:20:15 +02:00
|
|
|
FORMATS = [
|
2019-11-10 17:52:51 +01:00
|
|
|
("o" , "Original" , None),
|
|
|
|
("6k", "X-Large 6K" , 6144),
|
|
|
|
("5k", "X-Large 5K" , 5120),
|
|
|
|
("4k", "X-Large 4K" , 4096),
|
|
|
|
("3k", "X-Large 3K" , 3072),
|
|
|
|
("k" , "Large 2048" , 2048),
|
|
|
|
("h" , "Large 1600" , 1600),
|
|
|
|
("l" , "Large" , 1024),
|
|
|
|
("c" , "Medium 800" , 800),
|
|
|
|
("z" , "Medium 640" , 640),
|
|
|
|
("m" , "Medium" , 500),
|
|
|
|
("n" , "Small 320" , 320),
|
|
|
|
("s" , "Small" , 240),
|
|
|
|
("q" , "Large Square", 150),
|
|
|
|
("t" , "Thumbnail" , 100),
|
|
|
|
("s" , "Square" , 75),
|
2017-06-18 22:20:15 +02:00
|
|
|
]
|
2019-05-14 18:12:02 +02:00
|
|
|
VIDEO_FORMATS = {
|
|
|
|
"orig" : 9,
|
|
|
|
"1080p" : 8,
|
|
|
|
"720p" : 7,
|
|
|
|
"360p" : 6,
|
|
|
|
"288p" : 5,
|
|
|
|
"700" : 4,
|
|
|
|
"300" : 3,
|
|
|
|
"100" : 2,
|
|
|
|
"appletv" : 1,
|
|
|
|
"iphone_wifi": 0,
|
|
|
|
}
|
2017-05-30 17:43:02 +02:00
|
|
|
|
2017-06-12 09:36:14 +02:00
|
|
|
def __init__(self, extractor):
|
2018-05-10 21:57:45 +02:00
|
|
|
oauth.OAuth1API.__init__(self, extractor)
|
2017-06-18 22:20:15 +02:00
|
|
|
|
2023-07-01 19:19:39 +02:00
|
|
|
self.exif = extractor.config("exif", False)
|
2019-05-14 18:12:02 +02:00
|
|
|
self.videos = extractor.config("videos", True)
|
2024-03-18 00:01:27 +01:00
|
|
|
self.contexts = extractor.config("contexts", False)
|
|
|
|
|
2017-06-20 16:09:25 +02:00
|
|
|
self.maxsize = extractor.config("size-max")
|
|
|
|
if isinstance(self.maxsize, str):
|
|
|
|
for fmt, fmtname, fmtwidth in self.FORMATS:
|
|
|
|
if self.maxsize == fmt or self.maxsize == fmtname:
|
|
|
|
self.maxsize = fmtwidth
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
self.maxsize = None
|
|
|
|
extractor.log.warning(
|
|
|
|
"Could not match '%s' to any format", self.maxsize)
|
|
|
|
if self.maxsize:
|
2017-06-18 22:20:15 +02:00
|
|
|
self.formats = [fmt for fmt in self.FORMATS
|
2017-06-22 17:03:34 +02:00
|
|
|
if not fmt[2] or fmt[2] <= self.maxsize]
|
2017-06-18 22:20:15 +02:00
|
|
|
else:
|
|
|
|
self.formats = self.FORMATS
|
2019-11-10 17:52:51 +01:00
|
|
|
self.formats = self.formats[:8]
|
2017-05-30 17:43:02 +02:00
|
|
|
|
2017-06-12 16:37:06 +02:00
|
|
|
def favorites_getList(self, user_id):
|
|
|
|
"""Returns a list of the user's favorite photos."""
|
2017-06-02 16:35:04 +02:00
|
|
|
params = {"user_id": user_id}
|
2019-05-14 18:12:02 +02:00
|
|
|
return self._pagination("favorites.getList", params)
|
2017-06-02 17:15:05 +02:00
|
|
|
|
2017-06-06 14:22:36 +02:00
|
|
|
def galleries_getInfo(self, gallery_id):
|
|
|
|
"""Gets information about a gallery."""
|
|
|
|
params = {"gallery_id": gallery_id}
|
|
|
|
gallery = self._call("galleries.getInfo", params)["gallery"]
|
2018-09-29 16:17:26 +02:00
|
|
|
return self._clean_info(gallery)
|
2017-06-06 14:22:36 +02:00
|
|
|
|
|
|
|
def galleries_getPhotos(self, gallery_id):
|
|
|
|
"""Return the list of photos for a gallery."""
|
|
|
|
params = {"gallery_id": gallery_id}
|
2019-05-14 18:12:02 +02:00
|
|
|
return self._pagination("galleries.getPhotos", params)
|
2017-06-06 14:22:36 +02:00
|
|
|
|
2017-06-06 16:22:30 +02:00
|
|
|
def groups_pools_getPhotos(self, group_id):
|
|
|
|
"""Returns a list of pool photos for a given group."""
|
|
|
|
params = {"group_id": group_id}
|
2019-05-14 18:12:02 +02:00
|
|
|
return self._pagination("groups.pools.getPhotos", params)
|
2017-06-06 16:22:30 +02:00
|
|
|
|
2017-06-12 16:37:06 +02:00
|
|
|
def people_getPhotos(self, user_id):
|
|
|
|
"""Return photos from the given user's photostream."""
|
2017-06-02 17:15:05 +02:00
|
|
|
params = {"user_id": user_id}
|
2019-05-14 18:12:02 +02:00
|
|
|
return self._pagination("people.getPhotos", params)
|
2017-06-02 16:35:04 +02:00
|
|
|
|
2024-03-18 00:01:27 +01:00
|
|
|
def photos_getAllContexts(self, photo_id):
|
|
|
|
"""Returns all visible sets and pools the photo belongs to."""
|
|
|
|
params = {"photo_id": photo_id}
|
|
|
|
data = self._call("photos.getAllContexts", params)
|
|
|
|
del data["stat"]
|
|
|
|
return data
|
|
|
|
|
2023-07-01 19:19:39 +02:00
|
|
|
def photos_getExif(self, photo_id):
|
|
|
|
"""Retrieves a list of EXIF/TIFF/GPS tags for a given photo."""
|
|
|
|
params = {"photo_id": photo_id}
|
|
|
|
return self._call("photos.getExif", params)["photo"]
|
|
|
|
|
2017-05-30 17:43:02 +02:00
|
|
|
def photos_getInfo(self, photo_id):
|
2017-06-02 17:15:05 +02:00
|
|
|
"""Get information about a photo."""
|
2017-05-30 17:43:02 +02:00
|
|
|
params = {"photo_id": photo_id}
|
|
|
|
return self._call("photos.getInfo", params)["photo"]
|
|
|
|
|
|
|
|
def photos_getSizes(self, photo_id):
|
2017-06-02 17:15:05 +02:00
|
|
|
"""Returns the available sizes for a photo."""
|
2017-05-30 17:43:02 +02:00
|
|
|
params = {"photo_id": photo_id}
|
2017-06-18 22:20:15 +02:00
|
|
|
sizes = self._call("photos.getSizes", params)["sizes"]["size"]
|
2017-06-20 16:09:25 +02:00
|
|
|
if self.maxsize:
|
2017-06-18 22:20:15 +02:00
|
|
|
for index, size in enumerate(sizes):
|
2017-06-20 16:09:25 +02:00
|
|
|
if index > 0 and (int(size["width"]) > self.maxsize or
|
|
|
|
int(size["height"]) > self.maxsize):
|
2017-06-18 22:20:15 +02:00
|
|
|
del sizes[index:]
|
|
|
|
break
|
|
|
|
return sizes
|
2017-05-31 17:31:51 +02:00
|
|
|
|
2017-06-13 08:01:32 +02:00
|
|
|
def photos_search(self, params):
|
|
|
|
"""Return a list of photos matching some criteria."""
|
2019-05-14 18:12:02 +02:00
|
|
|
return self._pagination("photos.search", params.copy())
|
2017-06-13 08:01:32 +02:00
|
|
|
|
2018-09-29 16:17:26 +02:00
|
|
|
def photosets_getInfo(self, photoset_id, user_id):
|
|
|
|
"""Gets information about a photoset."""
|
|
|
|
params = {"photoset_id": photoset_id, "user_id": user_id}
|
|
|
|
photoset = self._call("photosets.getInfo", params)["photoset"]
|
|
|
|
return self._clean_info(photoset)
|
|
|
|
|
2018-11-23 09:09:37 +01:00
|
|
|
def photosets_getList(self, user_id):
|
|
|
|
"""Returns the photosets belonging to the specified user."""
|
|
|
|
params = {"user_id": user_id}
|
2019-05-14 18:12:02 +02:00
|
|
|
return self._pagination_sets("photosets.getList", params)
|
2018-11-23 09:09:37 +01:00
|
|
|
|
2017-05-31 17:31:51 +02:00
|
|
|
def photosets_getPhotos(self, photoset_id):
|
2017-06-02 17:15:05 +02:00
|
|
|
"""Get the list of photos in a set."""
|
2017-06-02 16:35:04 +02:00
|
|
|
params = {"photoset_id": photoset_id}
|
2019-05-14 18:12:02 +02:00
|
|
|
return self._pagination("photosets.getPhotos", params, "photoset")
|
2017-05-31 17:31:51 +02:00
|
|
|
|
2017-06-06 16:22:30 +02:00
|
|
|
def urls_lookupGroup(self, groupname):
|
|
|
|
"""Returns a group NSID, given the url to a group's page."""
|
|
|
|
params = {"url": "https://www.flickr.com/groups/" + groupname}
|
|
|
|
group = self._call("urls.lookupGroup", params)["group"]
|
|
|
|
return {"nsid": group["id"],
|
|
|
|
"path_alias": groupname,
|
|
|
|
"groupname": group["groupname"]["_content"]}
|
|
|
|
|
2017-06-02 16:35:04 +02:00
|
|
|
def urls_lookupUser(self, username):
|
2017-06-02 17:15:05 +02:00
|
|
|
"""Returns a user NSID, given the url to a user's photos or profile."""
|
2017-06-02 16:35:04 +02:00
|
|
|
params = {"url": "https://www.flickr.com/photos/" + username}
|
|
|
|
user = self._call("urls.lookupUser", params)["user"]
|
2020-11-15 02:48:08 +01:00
|
|
|
return {
|
|
|
|
"nsid" : user["id"],
|
|
|
|
"username" : user["username"]["_content"],
|
|
|
|
"path_alias": username,
|
|
|
|
}
|
2017-05-30 17:43:02 +02:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def video_getStreamInfo(self, video_id, secret=None):
|
|
|
|
"""Returns all available video streams"""
|
|
|
|
params = {"photo_id": video_id}
|
|
|
|
if not secret:
|
|
|
|
secret = self._call("photos.getInfo", params)["photo"]["secret"]
|
|
|
|
params["secret"] = secret
|
|
|
|
stream = self._call("video.getStreamInfo", params)["streams"]["stream"]
|
|
|
|
return max(stream, key=lambda s: self.VIDEO_FORMATS.get(s["type"], 0))
|
|
|
|
|
2017-05-30 17:43:02 +02:00
|
|
|
def _call(self, method, params):
|
|
|
|
params["method"] = "flickr." + method
|
|
|
|
params["format"] = "json"
|
|
|
|
params["nojsoncallback"] = "1"
|
2017-09-09 17:31:42 +02:00
|
|
|
if self.api_key:
|
|
|
|
params["api_key"] = self.api_key
|
2024-02-06 21:22:10 +01:00
|
|
|
response = self.request(self.API_URL, params=params)
|
|
|
|
try:
|
|
|
|
data = response.json()
|
|
|
|
except ValueError:
|
|
|
|
data = {"code": -1, "message": response.content}
|
2018-09-17 10:10:32 +02:00
|
|
|
if "code" in data:
|
2019-10-28 16:06:36 +01:00
|
|
|
msg = data.get("message")
|
|
|
|
self.log.debug("Server response: %s", data)
|
2018-09-17 10:10:32 +02:00
|
|
|
if data["code"] == 1:
|
2018-12-04 21:23:35 +01:00
|
|
|
raise exception.NotFoundError(self.extractor.subcategory)
|
2024-08-14 09:44:04 +02:00
|
|
|
elif data["code"] == 2:
|
|
|
|
raise exception.AuthorizationError(msg)
|
2018-09-17 10:10:32 +02:00
|
|
|
elif data["code"] == 98:
|
2019-10-28 16:06:36 +01:00
|
|
|
raise exception.AuthenticationError(msg)
|
2018-09-17 10:10:32 +02:00
|
|
|
elif data["code"] == 99:
|
2019-10-28 16:06:36 +01:00
|
|
|
raise exception.AuthorizationError(msg)
|
|
|
|
raise exception.StopExtraction("API request failed: %s", msg)
|
2017-05-30 17:43:02 +02:00
|
|
|
return data
|
2017-06-02 16:35:04 +02:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def _pagination(self, method, params, key="photos"):
|
2023-06-26 16:49:48 +02:00
|
|
|
extras = ("description,date_upload,tags,views,media,"
|
|
|
|
"path_alias,owner_name,")
|
|
|
|
includes = self.extractor.config("metadata")
|
|
|
|
if includes:
|
|
|
|
if isinstance(includes, (list, tuple)):
|
|
|
|
includes = ",".join(includes)
|
|
|
|
elif not isinstance(includes, str):
|
|
|
|
includes = ("license,date_taken,original_format,last_update,"
|
|
|
|
"geo,machine_tags,o_dims")
|
|
|
|
extras = extras + includes + ","
|
|
|
|
extras += ",".join("url_" + fmt[0] for fmt in self.formats)
|
|
|
|
|
|
|
|
params["extras"] = extras
|
2017-06-02 16:35:04 +02:00
|
|
|
params["page"] = 1
|
|
|
|
|
|
|
|
while True:
|
2019-05-14 18:12:02 +02:00
|
|
|
data = self._call(method, params)[key]
|
2019-08-27 23:26:49 +02:00
|
|
|
yield from data["photo"]
|
2019-05-14 18:12:02 +02:00
|
|
|
if params["page"] >= data["pages"]:
|
|
|
|
return
|
|
|
|
params["page"] += 1
|
2017-06-02 16:35:04 +02:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
def _pagination_sets(self, method, params):
|
|
|
|
params["page"] = 1
|
2017-06-02 16:35:04 +02:00
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
while True:
|
|
|
|
data = self._call(method, params)["photosets"]
|
|
|
|
yield from data["photoset"]
|
|
|
|
if params["page"] >= data["pages"]:
|
|
|
|
return
|
2017-06-02 16:35:04 +02:00
|
|
|
params["page"] += 1
|
|
|
|
|
|
|
|
def _extract_format(self, photo):
|
2019-05-14 18:12:02 +02:00
|
|
|
photo["description"] = photo["description"]["_content"].strip()
|
|
|
|
photo["views"] = text.parse_int(photo["views"])
|
|
|
|
photo["date"] = text.parse_timestamp(photo["dateupload"])
|
|
|
|
photo["tags"] = photo["tags"].split()
|
2023-07-01 19:19:39 +02:00
|
|
|
|
2024-08-14 09:44:04 +02:00
|
|
|
self._extract_metadata(photo)
|
2019-05-14 18:12:02 +02:00
|
|
|
photo["id"] = text.parse_int(photo["id"])
|
|
|
|
|
2020-11-15 02:48:08 +01:00
|
|
|
if "owner" in photo:
|
|
|
|
photo["owner"] = {
|
|
|
|
"nsid" : photo["owner"],
|
|
|
|
"username" : photo["ownername"],
|
|
|
|
"path_alias": photo["pathalias"],
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
photo["owner"] = self.extractor.user
|
|
|
|
del photo["pathalias"]
|
|
|
|
del photo["ownername"]
|
|
|
|
|
2019-05-14 18:12:02 +02:00
|
|
|
if photo["media"] == "video" and self.videos:
|
|
|
|
return self._extract_video(photo)
|
|
|
|
|
2017-06-18 22:20:15 +02:00
|
|
|
for fmt, fmtname, fmtwidth in self.formats:
|
2017-06-02 16:35:04 +02:00
|
|
|
key = "url_" + fmt
|
|
|
|
if key in photo:
|
2019-05-14 18:12:02 +02:00
|
|
|
photo["width"] = text.parse_int(photo["width_" + fmt])
|
|
|
|
photo["height"] = text.parse_int(photo["height_" + fmt])
|
|
|
|
if self.maxsize and (photo["width"] > self.maxsize or
|
|
|
|
photo["height"] > self.maxsize):
|
2017-06-18 22:20:15 +02:00
|
|
|
continue
|
2019-05-14 18:12:02 +02:00
|
|
|
photo["url"] = photo[key]
|
|
|
|
photo["label"] = fmtname
|
|
|
|
|
2017-06-02 16:35:04 +02:00
|
|
|
# remove excess data
|
|
|
|
keys = [
|
2019-05-14 18:12:02 +02:00
|
|
|
key for key in photo
|
2017-06-02 16:35:04 +02:00
|
|
|
if key.startswith(("url_", "width_", "height_"))
|
|
|
|
]
|
|
|
|
for key in keys:
|
|
|
|
del photo[key]
|
|
|
|
break
|
|
|
|
else:
|
2019-05-14 18:12:02 +02:00
|
|
|
self._extract_photo(photo)
|
|
|
|
|
|
|
|
return photo
|
|
|
|
|
|
|
|
def _extract_photo(self, photo):
|
|
|
|
size = self.photos_getSizes(photo["id"])[-1]
|
|
|
|
photo["url"] = size["source"]
|
|
|
|
photo["label"] = size["label"]
|
|
|
|
photo["width"] = text.parse_int(size["width"])
|
|
|
|
photo["height"] = text.parse_int(size["height"])
|
|
|
|
return photo
|
|
|
|
|
|
|
|
def _extract_video(self, photo):
|
|
|
|
stream = self.video_getStreamInfo(photo["id"], photo.get("secret"))
|
|
|
|
photo["url"] = stream["_content"]
|
|
|
|
photo["label"] = stream["type"]
|
|
|
|
photo["width"] = photo["height"] = 0
|
|
|
|
return photo
|
2018-09-29 16:17:26 +02:00
|
|
|
|
2024-08-14 09:44:04 +02:00
|
|
|
def _extract_metadata(self, photo):
|
|
|
|
if self.exif:
|
|
|
|
try:
|
|
|
|
photo.update(self.photos_getExif(photo["id"]))
|
|
|
|
except Exception as exc:
|
|
|
|
self.log.warning(
|
|
|
|
"Unable to retrieve 'exif' data for %s (%s: %s)",
|
|
|
|
photo["id"], exc.__class__.__name__, exc)
|
|
|
|
|
|
|
|
if self.contexts:
|
|
|
|
try:
|
2024-08-22 18:00:35 +02:00
|
|
|
photo.update(self.photos_getAllContexts(photo["id"]))
|
2024-08-14 09:44:04 +02:00
|
|
|
except Exception as exc:
|
|
|
|
self.log.warning(
|
|
|
|
"Unable to retrieve 'contexts' data for %s (%s: %s)",
|
|
|
|
photo["id"], exc.__class__.__name__, exc)
|
|
|
|
|
2018-09-29 16:17:26 +02:00
|
|
|
@staticmethod
|
|
|
|
def _clean_info(info):
|
|
|
|
info["title"] = info["title"]["_content"]
|
|
|
|
info["description"] = info["description"]["_content"]
|
|
|
|
return info
|