1
0
mirror of https://github.com/instaloader/instaloader.git synced 2024-09-11 16:22:24 +02:00

Adapt new graphql layout used by Instagram

Fixes #19.
This commit is contained in:
André Koch-Kramer 2017-04-21 18:01:20 +02:00
parent 6b345e1f52
commit 361445519a
2 changed files with 53 additions and 27 deletions

View File

@ -55,7 +55,7 @@ You may also download the most recent pictures with one hashtag:
:: ::
instaloader #hashtag instaloader "#hashtag"
Instaloader can also be used to **download private profiles**. To do so, Instaloader can also be used to **download private profiles**. To do so,
invoke it with invoke it with
@ -121,7 +121,8 @@ You could also download your last 20 liked pics with
.. code:: python .. code:: python
instaloader.download_feed_pics(session, max_count=20, fast_update=True, instaloader.download_feed_pics(session, max_count=20, fast_update=True,
filter_func=lambda node: not node["likes"]["viewer_has_liked"]) filter_func=lambda node:
not node["likes"]["viewer_has_liked"] if "likes" in node else not node["viewer_has_liked"])
To download the last 20 pictures with hashtag #cat, do To download the last 20 pictures with hashtag #cat, do

View File

@ -84,7 +84,7 @@ def get_username_by_id(session: requests.Session, profile_id: int) -> str:
"""To get the current username of a profile, given its unique ID, this function can be used. """To get the current username of a profile, given its unique ID, this function can be used.
session is required to be a logged-in (i.e. non-anonymous) session.""" session is required to be a logged-in (i.e. non-anonymous) session."""
tempsession = copy_session(session) tempsession = copy_session(session)
tempsession.headers.update({'Content-Type' : 'application/json'}) tempsession.headers.update({'Content-Type' : 'application/x-www-form-urlencoded'})
resp = tempsession.post('https://www.instagram.com/query/', data='q=ig_user(' + resp = tempsession.post('https://www.instagram.com/query/', data='q=ig_user(' +
str(profile_id) +')+%7B%0A++username%0A%7D%0A') str(profile_id) +')+%7B%0A++username%0A%7D%0A')
if resp.status_code == 200: if resp.status_code == 200:
@ -147,7 +147,7 @@ def get_followees(profile: str, session: requests.Session) -> List[Dict[str, Any
"&ref=relationships%3A%3Afollow_list"] "&ref=relationships%3A%3Afollow_list"]
tmpsession.headers.update(default_http_header()) tmpsession.headers.update(default_http_header())
tmpsession.headers.update({'Referer' : 'https://www.instagram.com/'+profile+'/following/'}) tmpsession.headers.update({'Referer' : 'https://www.instagram.com/'+profile+'/following/'})
tmpsession.headers.update({'Content-Type' : 'application/json'}) tmpsession.headers.update({'Content-Type' : 'application/x-www-form-urlencoded'})
resp = tmpsession.post('https://www.instagram.com/query/', data=query[0]+"first("+query[1]) resp = tmpsession.post('https://www.instagram.com/query/', data=query[0]+"first("+query[1])
if resp.status_code == 200: if resp.status_code == 200:
data = json.loads(resp.text) data = json.loads(resp.text)
@ -431,7 +431,7 @@ def get_feed_json(session: requests.Session, end_cursor: str = None, sleep: bool
"profile_pic_url%2C%0A++username%0A%7D%0A&ref=feed::show" "profile_pic_url%2C%0A++username%0A%7D%0A&ref=feed::show"
tmpsession.headers.update(default_http_header()) tmpsession.headers.update(default_http_header())
tmpsession.headers.update({'Referer' : 'https://www.instagram.com/'}) tmpsession.headers.update({'Referer' : 'https://www.instagram.com/'})
tmpsession.headers.update({'Content-Type' : 'application/json'}) tmpsession.headers.update({'Content-Type' : 'application/x-www-form-urlencoded'})
resp = tmpsession.post('https://www.instagram.com/query/', data=query) resp = tmpsession.post('https://www.instagram.com/query/', data=query)
if sleep: if sleep:
time.sleep(4 * random.random() + 1) time.sleep(4 * random.random() + 1)
@ -463,36 +463,51 @@ def download_node(node: Dict[str, Any], session: requests.Session, name: str,
:param quiet: Suppress output :param quiet: Suppress output
:return: True if something was downloaded, False otherwise, i.e. file was already there :return: True if something was downloaded, False otherwise, i.e. file was already there
""" """
if '__typename' in node and node['__typename'] == 'GraphSidecar': # pylint:disable=too-many-branches,too-many-locals
sidecar_data = session.get('https://www.instagram.com/p/' + node['code'] + '/', params={'__a': 1}).json() date = node["date"] if "date" in node else node["taken_at_timestamp"]
edge_number = 1 if '__typename' in node:
downloaded = False if node['__typename'] == 'GraphSidecar':
for edge in sidecar_data['media']['edge_sidecar_to_children']['edges']: sidecar_data = session.get('https://www.instagram.com/p/' + node['code'] + '/', params={'__a': 1}).json()
edge_downloaded = download_pic(name, edge['node']['display_url'], node['date'], edge_number = 1
filename_suffix=str(edge_number), quiet=quiet, downloaded = False
outputlabel=(str(edge_number) if edge_number != 1 else None)) for edge in sidecar_data['media']['edge_sidecar_to_children']['edges']:
downloaded = downloaded or edge_downloaded edge_downloaded = download_pic(name, edge['node']['display_url'],date,
edge_number += 1 filename_suffix=str(edge_number), quiet=quiet,
outputlabel=(str(edge_number) if edge_number != 1 else None))
downloaded = downloaded or edge_downloaded
edge_number += 1
if sleep:
time.sleep(1.75 * random.random() + 0.25)
elif node['__typename'] == 'GraphImage':
downloaded = download_pic(name, node["display_url"], date, quiet=quiet)
if sleep: if sleep:
time.sleep(1.75 * random.random() + 0.25) time.sleep(1.75 * random.random() + 0.25)
else:
_log("Warning: Unknown typename discovered:" + node['__typename'])
downloaded = False
else: else:
# Node is image or video. # Node is an old image or video.
downloaded = download_pic(name, node["display_src"], node["date"], quiet=quiet) downloaded = download_pic(name, node["display_src"], date, quiet=quiet)
if sleep: if sleep:
time.sleep(1.75 * random.random() + 0.25) time.sleep(1.75 * random.random() + 0.25)
if "caption" in node: if "edge_media_to_caption" in node and node["edge_media_to_caption"]["edges"]:
save_caption(name, node["date"], node["caption"], shorter_output, quiet) save_caption(name, date, node["edge_media_to_caption"]["edges"][0]["node"]["text"], shorter_output, quiet)
elif "caption" in node:
save_caption(name, date, node["caption"], shorter_output, quiet)
else: else:
_log("<no caption>", end=' ', flush=True, quiet=quiet) _log("<no caption>", end=' ', flush=True, quiet=quiet)
if node["is_video"] and download_videos: if node["is_video"] and download_videos:
video_data = get_json('p/' + node["code"], session, sleep=sleep) if "shortcode" in node:
video_data = get_json('p/' + node["shortcode"], session, sleep=sleep)
else:
video_data = get_json('p/' + node["code"], session, sleep=sleep)
download_pic(name, download_pic(name,
video_data['entry_data']['PostPage'][0]['graphql']['shortcode_media']['video_url'], video_data['entry_data']['PostPage'][0]['graphql']['shortcode_media']['video_url'],
node["date"], 'mp4', quiet=quiet) date, 'mp4', quiet=quiet)
if geotags: if geotags:
location = get_location(node, session, sleep) location = get_location(node, session, sleep)
if location: if location:
save_location(name, location, node["date"]) save_location(name, location, date)
_log(quiet=quiet) _log(quiet=quiet)
return downloaded return downloaded
@ -506,7 +521,8 @@ def download_feed_pics(session: requests.Session, max_count: int = None, fast_up
Example to download up to the 20 pics the user last liked: Example to download up to the 20 pics the user last liked:
>>> download_feed_pics(load_session('USER'), max_count=20, fast_update=True, >>> download_feed_pics(load_session('USER'), max_count=20, fast_update=True,
>>> filter_func=lambda node: not node["likes"]["viewer_has_liked"]) >>> filter_func=lambda node:
>>> not node["likes"]["viewer_has_liked"] if "likes" in node else not node["viewer_has_liked"])
:param session: Session belonging to a user, i.e. not an anonymous session :param session: Session belonging to a user, i.e. not an anonymous session
:param max_count: Maximum count of pictures to download :param max_count: Maximum count of pictures to download
@ -518,12 +534,20 @@ def download_feed_pics(session: requests.Session, max_count: int = None, fast_up
:param sleep: Sleep between requests to instagram server :param sleep: Sleep between requests to instagram server
:param quiet: Suppress output :param quiet: Suppress output
""" """
# pylint:disable=too-many-locals
data = get_feed_json(session, sleep=sleep) data = get_feed_json(session, sleep=sleep)
count = 1 count = 1
while data["feed"]["media"]["page_info"]["has_next_page"]: while True:
for node in data["feed"]["media"]["nodes"]: if "graphql" in data:
is_edge = True
feed = data["graphql"]["user"]["edge_web_feed_timeline"]
else:
is_edge = False
feed = data["feed"]["media"]
for edge_or_node in feed["edges"] if is_edge else feed["nodes"]:
if max_count is not None and count > max_count: if max_count is not None and count > max_count:
return return
node = edge_or_node["node"] if is_edge else edge_or_node
name = node["owner"]["username"] name = node["owner"]["username"]
if filter_func is not None and filter_func(node): if filter_func is not None and filter_func(node):
_log("<pic by %s skipped>" % name, flush=True, quiet=quiet) _log("<pic by %s skipped>" % name, flush=True, quiet=quiet)
@ -535,8 +559,9 @@ def download_feed_pics(session: requests.Session, max_count: int = None, fast_up
sleep=sleep, shorter_output=shorter_output, quiet=quiet) sleep=sleep, shorter_output=shorter_output, quiet=quiet)
if fast_update and not downloaded: if fast_update and not downloaded:
return return
data = get_feed_json(session, end_cursor=data["feed"]["media"]["page_info"]["end_cursor"], if not feed["page_info"]["has_next_page"]:
sleep=sleep) break
data = get_feed_json(session, end_cursor=feed["page_info"]["end_cursor"], sleep=sleep)
def get_hashtag_json(hashtag: str, session: requests.Session, def get_hashtag_json(hashtag: str, session: requests.Session,