2008-07-21 23:12:31 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Author: Ricardo Garcia Gonzalez
|
2009-02-02 20:34:20 +01:00
|
|
|
# Author: Danny Colligan
|
2010-01-15 22:26:41 +01:00
|
|
|
# Author: Benjamin Johnson
|
2010-12-08 11:04:22 +01:00
|
|
|
# Author: Vasyl' Vavrychuk
|
2008-07-21 23:12:31 +02:00
|
|
|
# License: Public domain code
|
2010-10-23 12:54:00 +02:00
|
|
|
import cookielib
|
2010-11-30 17:51:00 +01:00
|
|
|
import datetime
|
2008-07-21 23:12:31 +02:00
|
|
|
import htmlentitydefs
|
|
|
|
import httplib
|
2008-09-13 13:23:24 +02:00
|
|
|
import locale
|
2008-07-21 23:12:31 +02:00
|
|
|
import math
|
|
|
|
import netrc
|
|
|
|
import os
|
|
|
|
import os.path
|
|
|
|
import re
|
|
|
|
import socket
|
|
|
|
import string
|
2010-01-03 13:12:11 +01:00
|
|
|
import subprocess
|
2008-07-21 23:12:31 +02:00
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import urllib
|
|
|
|
import urllib2
|
2010-01-06 10:49:38 +01:00
|
|
|
|
|
|
|
# parse_qs was moved from the cgi module to the urlparse module recently.
|
|
|
|
try:
|
|
|
|
from urlparse import parse_qs
|
|
|
|
except ImportError:
|
|
|
|
from cgi import parse_qs
|
2008-07-21 23:12:31 +02:00
|
|
|
|
2009-04-02 20:23:13 +02:00
|
|
|
std_headers = {
|
2010-11-19 19:40:18 +01:00
|
|
|
'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101028 Firefox/3.6.12',
|
2008-07-21 23:12:31 +02:00
|
|
|
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
|
2010-07-13 19:43:06 +02:00
|
|
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
2008-07-21 23:12:31 +02:00
|
|
|
'Accept-Language': 'en-us,en;q=0.5',
|
|
|
|
}
|
|
|
|
|
|
|
|
simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii')
|
|
|
|
|
2009-09-13 10:45:04 +02:00
|
|
|
def preferredencoding():
|
|
|
|
"""Get preferred encoding.
|
|
|
|
|
|
|
|
Returns the best encoding scheme for the system, based on
|
|
|
|
locale.getpreferredencoding() and some further tweaks.
|
|
|
|
"""
|
2009-09-20 00:08:50 +02:00
|
|
|
def yield_preferredencoding():
|
|
|
|
try:
|
|
|
|
pref = locale.getpreferredencoding()
|
|
|
|
u'TEST'.encode(pref)
|
|
|
|
except:
|
|
|
|
pref = 'UTF-8'
|
|
|
|
while True:
|
|
|
|
yield pref
|
|
|
|
return yield_preferredencoding().next()
|
2009-09-13 10:45:04 +02:00
|
|
|
|
2010-02-12 21:01:55 +01:00
|
|
|
def htmlentity_transform(matchobj):
|
|
|
|
"""Transforms an HTML entity to a Unicode character.
|
|
|
|
|
|
|
|
This function receives a match object and is intended to be used with
|
|
|
|
the re.sub() function.
|
|
|
|
"""
|
|
|
|
entity = matchobj.group(1)
|
|
|
|
|
|
|
|
# Known non-numeric HTML entity
|
|
|
|
if entity in htmlentitydefs.name2codepoint:
|
|
|
|
return unichr(htmlentitydefs.name2codepoint[entity])
|
|
|
|
|
|
|
|
# Unicode character
|
|
|
|
mobj = re.match(ur'(?u)#(x?\d+)', entity)
|
|
|
|
if mobj is not None:
|
|
|
|
numstr = mobj.group(1)
|
|
|
|
if numstr.startswith(u'x'):
|
|
|
|
base = 16
|
|
|
|
numstr = u'0%s' % numstr
|
|
|
|
else:
|
|
|
|
base = 10
|
|
|
|
return unichr(long(numstr, base))
|
|
|
|
|
|
|
|
# Unknown entity in name, return its literal representation
|
|
|
|
return (u'&%s;' % entity)
|
|
|
|
|
|
|
|
def sanitize_title(utitle):
|
2010-02-13 13:29:25 +01:00
|
|
|
"""Sanitizes a video title so it could be used as part of a filename."""
|
2010-02-12 21:01:55 +01:00
|
|
|
utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
|
|
|
|
return utitle.replace(unicode(os.sep), u'%')
|
|
|
|
|
2010-02-13 13:29:25 +01:00
|
|
|
def sanitize_open(filename, open_mode):
|
|
|
|
"""Try to open the given filename, and slightly tweak it if this fails.
|
|
|
|
|
|
|
|
Attempts to open the given filename. If this fails, it tries to change
|
|
|
|
the filename slightly, step by step, until it's either able to open it
|
|
|
|
or it fails and raises a final exception, like the standard open()
|
|
|
|
function.
|
|
|
|
|
|
|
|
It returns the tuple (stream, definitive_file_name).
|
|
|
|
"""
|
|
|
|
try:
|
2010-03-19 17:51:20 +01:00
|
|
|
if filename == u'-':
|
2010-10-23 12:22:42 +02:00
|
|
|
if sys.platform == 'win32':
|
|
|
|
import msvcrt
|
|
|
|
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
|
2010-03-19 17:51:20 +01:00
|
|
|
return (sys.stdout, filename)
|
2010-02-13 13:29:25 +01:00
|
|
|
stream = open(filename, open_mode)
|
|
|
|
return (stream, filename)
|
|
|
|
except (IOError, OSError), err:
|
|
|
|
# In case of error, try to remove win32 forbidden chars
|
2010-06-29 11:10:12 +02:00
|
|
|
filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
|
2010-02-13 13:29:25 +01:00
|
|
|
|
|
|
|
# An exception here should be caught in the caller
|
|
|
|
stream = open(filename, open_mode)
|
|
|
|
return (stream, filename)
|
|
|
|
|
2008-07-22 15:52:56 +02:00
|
|
|
class DownloadError(Exception):
|
|
|
|
"""Download Error exception.
|
|
|
|
|
|
|
|
This exception may be thrown by FileDownloader objects if they are not
|
|
|
|
configured to continue on errors. They will contain the appropriate
|
|
|
|
error message.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
class SameFileError(Exception):
|
|
|
|
"""Same File exception.
|
|
|
|
|
|
|
|
This exception will be thrown by FileDownloader objects if they detect
|
|
|
|
multiple files would have to be downloaded to the same file on disk.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
2008-07-27 12:13:49 +02:00
|
|
|
class PostProcessingError(Exception):
|
|
|
|
"""Post Processing exception.
|
|
|
|
|
|
|
|
This exception may be raised by PostProcessor's .run() method to
|
|
|
|
indicate an error in the postprocessing task.
|
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
2010-07-22 20:26:37 +02:00
|
|
|
class UnavailableVideoError(Exception):
|
2009-04-28 07:30:20 +02:00
|
|
|
"""Unavailable Format exception.
|
|
|
|
|
|
|
|
This exception will be thrown when a video is requested
|
|
|
|
in a format that is not available for that video.
|
|
|
|
"""
|
2009-05-21 20:59:02 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
class ContentTooShortError(Exception):
|
|
|
|
"""Content Too Short exception.
|
|
|
|
|
|
|
|
This exception may be raised by FileDownloader objects when a file they
|
|
|
|
download is too small for what the server announced first, indicating
|
|
|
|
the connection was probably interrupted.
|
|
|
|
"""
|
|
|
|
# Both in bytes
|
|
|
|
downloaded = None
|
|
|
|
expected = None
|
|
|
|
|
|
|
|
def __init__(self, downloaded, expected):
|
|
|
|
self.downloaded = downloaded
|
|
|
|
self.expected = expected
|
2009-04-28 07:30:20 +02:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
class FileDownloader(object):
|
|
|
|
"""File Downloader class.
|
|
|
|
|
|
|
|
File downloader objects are the ones responsible of downloading the
|
|
|
|
actual video file and writing it to disk if the user has requested
|
|
|
|
it, among some other tasks. In most cases there should be one per
|
|
|
|
program. As, given a video URL, the downloader doesn't know how to
|
|
|
|
extract all the needed information, task that InfoExtractors do, it
|
|
|
|
has to pass the URL to one of them.
|
|
|
|
|
|
|
|
For this, file downloader objects have a method that allows
|
|
|
|
InfoExtractors to be registered in a given order. When it is passed
|
|
|
|
a URL, the file downloader handles it to the first InfoExtractor it
|
2009-04-23 22:34:58 +02:00
|
|
|
finds that reports being able to handle it. The InfoExtractor extracts
|
|
|
|
all the information about the video or videos the URL refers to, and
|
|
|
|
asks the FileDownloader to process the video information, possibly
|
|
|
|
downloading the video.
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
File downloaders accept a lot of parameters. In order not to saturate
|
|
|
|
the object constructor with arguments, it receives a dictionary of
|
2009-03-04 22:12:33 +01:00
|
|
|
options instead. These options are available through the params
|
|
|
|
attribute for the InfoExtractors to use. The FileDownloader also
|
|
|
|
registers itself as the downloader in charge for the InfoExtractors
|
|
|
|
that are added to it, so this is a "mutual registration".
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
Available options:
|
|
|
|
|
2010-10-23 12:54:00 +02:00
|
|
|
username: Username for authentication purposes.
|
|
|
|
password: Password for authentication purposes.
|
|
|
|
usenetrc: Use netrc for authentication instead.
|
|
|
|
quiet: Do not print messages to stdout.
|
|
|
|
forceurl: Force printing final URL.
|
|
|
|
forcetitle: Force printing title.
|
|
|
|
forcethumbnail: Force printing thumbnail URL.
|
|
|
|
forcedescription: Force printing description.
|
|
|
|
simulate: Do not download the video files.
|
|
|
|
format: Video format code.
|
|
|
|
format_limit: Highest quality format to try.
|
|
|
|
outtmpl: Template for output names.
|
|
|
|
ignoreerrors: Do not stop on download errors.
|
|
|
|
ratelimit: Download speed limit, in bytes/sec.
|
|
|
|
nooverwrites: Prevent overwriting files.
|
|
|
|
retries: Number of times to retry for HTTP error 5xx
|
|
|
|
continuedl: Try to continue downloads if possible.
|
|
|
|
noprogress: Do not print the progress bar.
|
|
|
|
playliststart: Playlist item to start at.
|
2010-11-04 23:19:09 +01:00
|
|
|
playlistend: Playlist item to end at.
|
2010-10-23 13:19:26 +02:00
|
|
|
logtostderr: Log messages to stderr instead of stdout.
|
2008-07-21 23:12:31 +02:00
|
|
|
"""
|
|
|
|
|
2009-03-04 22:12:33 +01:00
|
|
|
params = None
|
2008-07-21 23:12:31 +02:00
|
|
|
_ies = []
|
2008-07-27 12:13:49 +02:00
|
|
|
_pps = []
|
2009-04-23 21:43:04 +02:00
|
|
|
_download_retcode = None
|
2010-04-03 09:54:36 +02:00
|
|
|
_num_downloads = None
|
2010-10-23 13:19:26 +02:00
|
|
|
_screen_file = None
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
def __init__(self, params):
|
2008-07-22 11:41:25 +02:00
|
|
|
"""Create a FileDownloader object with the given options."""
|
2008-07-21 23:12:31 +02:00
|
|
|
self._ies = []
|
2008-07-27 12:13:49 +02:00
|
|
|
self._pps = []
|
2009-04-23 21:43:04 +02:00
|
|
|
self._download_retcode = 0
|
2010-04-03 09:54:36 +02:00
|
|
|
self._num_downloads = 0
|
2010-10-23 13:19:26 +02:00
|
|
|
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
|
2009-03-04 22:12:33 +01:00
|
|
|
self.params = params
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def pmkdir(filename):
|
|
|
|
"""Create directory components in filename. Similar to Unix "mkdir -p"."""
|
|
|
|
components = filename.split(os.sep)
|
|
|
|
aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))]
|
2008-07-24 10:07:46 +02:00
|
|
|
aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator
|
2008-07-21 23:12:31 +02:00
|
|
|
for dir in aggregate:
|
|
|
|
if not os.path.exists(dir):
|
|
|
|
os.mkdir(dir)
|
|
|
|
|
2010-12-04 10:38:53 +01:00
|
|
|
@staticmethod
|
|
|
|
def temp_name(filename):
|
|
|
|
"""Returns a temporary filename for the given filename."""
|
2010-12-09 19:33:04 +01:00
|
|
|
if filename == u'-' or (os.path.exists(filename) and not os.path.isfile(filename)):
|
|
|
|
return filename
|
|
|
|
return filename + u'.part'
|
2010-12-04 10:38:53 +01:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
@staticmethod
|
|
|
|
def format_bytes(bytes):
|
|
|
|
if bytes is None:
|
|
|
|
return 'N/A'
|
2009-08-08 14:54:39 +02:00
|
|
|
if type(bytes) is str:
|
|
|
|
bytes = float(bytes)
|
|
|
|
if bytes == 0.0:
|
2008-07-21 23:12:31 +02:00
|
|
|
exponent = 0
|
|
|
|
else:
|
2009-08-08 14:54:39 +02:00
|
|
|
exponent = long(math.log(bytes, 1024.0))
|
2008-07-21 23:12:31 +02:00
|
|
|
suffix = 'bkMGTPEZY'[exponent]
|
|
|
|
converted = float(bytes) / float(1024**exponent)
|
|
|
|
return '%.2f%s' % (converted, suffix)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def calc_percent(byte_counter, data_len):
|
|
|
|
if data_len is None:
|
|
|
|
return '---.-%'
|
|
|
|
return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def calc_eta(start, now, total, current):
|
|
|
|
if total is None:
|
|
|
|
return '--:--'
|
|
|
|
dif = now - start
|
|
|
|
if current == 0 or dif < 0.001: # One millisecond
|
|
|
|
return '--:--'
|
|
|
|
rate = float(current) / dif
|
|
|
|
eta = long((float(total) - float(current)) / rate)
|
|
|
|
(eta_mins, eta_secs) = divmod(eta, 60)
|
|
|
|
if eta_mins > 99:
|
|
|
|
return '--:--'
|
|
|
|
return '%02d:%02d' % (eta_mins, eta_secs)
|
|
|
|
|
2009-03-01 00:00:04 +01:00
|
|
|
@staticmethod
|
2008-07-21 23:12:31 +02:00
|
|
|
def calc_speed(start, now, bytes):
|
|
|
|
dif = now - start
|
|
|
|
if bytes == 0 or dif < 0.001: # One millisecond
|
2008-07-21 23:53:06 +02:00
|
|
|
return '%10s' % '---b/s'
|
2008-07-21 23:12:31 +02:00
|
|
|
return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def best_block_size(elapsed_time, bytes):
|
|
|
|
new_min = max(bytes / 2.0, 1.0)
|
|
|
|
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
|
|
|
if elapsed_time < 0.001:
|
2009-05-27 23:03:56 +02:00
|
|
|
return long(new_max)
|
2008-07-21 23:12:31 +02:00
|
|
|
rate = bytes / elapsed_time
|
|
|
|
if rate > new_max:
|
2009-05-27 23:03:56 +02:00
|
|
|
return long(new_max)
|
2008-07-21 23:12:31 +02:00
|
|
|
if rate < new_min:
|
2009-05-27 23:03:56 +02:00
|
|
|
return long(new_min)
|
|
|
|
return long(rate)
|
2008-07-21 23:12:31 +02:00
|
|
|
|
2008-07-24 09:47:07 +02:00
|
|
|
@staticmethod
|
|
|
|
def parse_bytes(bytestr):
|
|
|
|
"""Parse a string indicating a byte quantity into a long integer."""
|
|
|
|
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
|
|
|
|
if matchobj is None:
|
|
|
|
return None
|
|
|
|
number = float(matchobj.group(1))
|
|
|
|
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
|
|
|
|
return long(round(number * multiplier))
|
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
def add_info_extractor(self, ie):
|
|
|
|
"""Add an InfoExtractor object to the end of the list."""
|
|
|
|
self._ies.append(ie)
|
|
|
|
ie.set_downloader(self)
|
|
|
|
|
2008-07-27 12:13:49 +02:00
|
|
|
def add_post_processor(self, pp):
|
|
|
|
"""Add a PostProcessor object to the end of the chain."""
|
|
|
|
self._pps.append(pp)
|
|
|
|
pp.set_downloader(self)
|
|
|
|
|
2010-10-23 13:19:26 +02:00
|
|
|
def to_screen(self, message, skip_eol=False, ignore_encoding_errors=False):
|
2008-07-21 23:53:06 +02:00
|
|
|
"""Print message to stdout if not in quiet mode."""
|
2010-02-28 23:49:14 +01:00
|
|
|
try:
|
|
|
|
if not self.params.get('quiet', False):
|
2010-10-23 13:19:26 +02:00
|
|
|
terminator = [u'\n', u''][skip_eol]
|
|
|
|
print >>self._screen_file, (u'%s%s' % (message, terminator)).encode(preferredencoding()),
|
|
|
|
self._screen_file.flush()
|
2010-02-28 23:49:14 +01:00
|
|
|
except (UnicodeEncodeError), err:
|
|
|
|
if not ignore_encoding_errors:
|
|
|
|
raise
|
2008-07-22 00:07:07 +02:00
|
|
|
|
|
|
|
def to_stderr(self, message):
|
|
|
|
"""Print message to stderr."""
|
2009-09-13 10:45:04 +02:00
|
|
|
print >>sys.stderr, message.encode(preferredencoding())
|
2008-07-22 09:45:49 +02:00
|
|
|
|
|
|
|
def fixed_template(self):
|
|
|
|
"""Checks if the output template is fixed."""
|
2009-03-04 22:12:33 +01:00
|
|
|
return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
|
2008-07-21 23:53:06 +02:00
|
|
|
|
2008-07-22 11:33:41 +02:00
|
|
|
def trouble(self, message=None):
|
|
|
|
"""Determine action to take when a download problem appears.
|
|
|
|
|
|
|
|
Depending on if the downloader has been configured to ignore
|
2008-07-22 15:52:56 +02:00
|
|
|
download errors or not, this method may throw an exception or
|
2009-04-23 21:43:04 +02:00
|
|
|
not when errors are found, after printing the message.
|
2008-07-22 11:33:41 +02:00
|
|
|
"""
|
|
|
|
if message is not None:
|
|
|
|
self.to_stderr(message)
|
2009-03-04 22:12:33 +01:00
|
|
|
if not self.params.get('ignoreerrors', False):
|
2008-07-22 15:52:56 +02:00
|
|
|
raise DownloadError(message)
|
2009-04-23 21:43:04 +02:00
|
|
|
self._download_retcode = 1
|
2008-07-22 11:33:41 +02:00
|
|
|
|
2008-07-24 09:47:07 +02:00
|
|
|
def slow_down(self, start_time, byte_counter):
|
|
|
|
"""Sleep if the download speed is over the rate limit."""
|
2009-03-04 22:12:33 +01:00
|
|
|
rate_limit = self.params.get('ratelimit', None)
|
2008-07-24 09:47:07 +02:00
|
|
|
if rate_limit is None or byte_counter == 0:
|
|
|
|
return
|
|
|
|
now = time.time()
|
|
|
|
elapsed = now - start_time
|
|
|
|
if elapsed <= 0.0:
|
|
|
|
return
|
|
|
|
speed = float(byte_counter) / elapsed
|
|
|
|
if speed > rate_limit:
|
|
|
|
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
|
2010-12-04 10:38:53 +01:00
|
|
|
|
|
|
|
def try_rename(self, old_filename, new_filename):
|
|
|
|
try:
|
2010-12-09 19:33:04 +01:00
|
|
|
if old_filename == new_filename:
|
|
|
|
return
|
2010-12-04 10:38:53 +01:00
|
|
|
os.rename(old_filename, new_filename)
|
|
|
|
except (IOError, OSError), err:
|
|
|
|
self.trouble(u'ERROR: unable to rename file')
|
2008-07-24 09:47:07 +02:00
|
|
|
|
2008-07-22 22:40:50 +02:00
|
|
|
def report_destination(self, filename):
|
|
|
|
"""Report destination filename."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
|
2008-07-22 22:40:50 +02:00
|
|
|
|
|
|
|
def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
|
|
|
|
"""Report download progress."""
|
2010-03-07 11:24:22 +01:00
|
|
|
if self.params.get('noprogress', False):
|
|
|
|
return
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
|
2008-07-22 22:40:50 +02:00
|
|
|
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
|
2009-05-27 22:50:18 +02:00
|
|
|
|
|
|
|
def report_resuming_byte(self, resume_len):
|
2010-08-12 18:28:34 +02:00
|
|
|
"""Report attempt to resume at given byte."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
|
2009-05-27 22:50:18 +02:00
|
|
|
|
2010-05-30 18:34:56 +02:00
|
|
|
def report_retry(self, count, retries):
|
2010-09-11 09:58:34 +02:00
|
|
|
"""Report retry in case of HTTP error 5xx"""
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
2010-05-30 18:34:56 +02:00
|
|
|
|
2009-05-27 22:50:18 +02:00
|
|
|
def report_file_already_downloaded(self, file_name):
|
|
|
|
"""Report file has already been fully downloaded."""
|
2010-02-28 23:49:14 +01:00
|
|
|
try:
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
2010-02-28 23:49:14 +01:00
|
|
|
except (UnicodeEncodeError), err:
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'[download] The file has already been downloaded')
|
2009-05-27 22:50:18 +02:00
|
|
|
|
|
|
|
def report_unable_to_resume(self):
|
|
|
|
"""Report it was impossible to resume download."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'[download] Unable to resume')
|
2008-07-22 22:40:50 +02:00
|
|
|
|
|
|
|
def report_finish(self):
|
|
|
|
"""Report download finished."""
|
2010-03-07 11:24:22 +01:00
|
|
|
if self.params.get('noprogress', False):
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'[download] Download completed')
|
2010-03-07 11:24:22 +01:00
|
|
|
else:
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'')
|
2010-07-13 19:37:07 +02:00
|
|
|
|
|
|
|
def increment_downloads(self):
|
|
|
|
"""Increment the ordinal that assigns a number to each file."""
|
|
|
|
self._num_downloads += 1
|
2008-07-22 22:40:50 +02:00
|
|
|
|
2009-04-10 00:59:59 +02:00
|
|
|
def process_info(self, info_dict):
|
|
|
|
"""Process a single dictionary returned by an InfoExtractor."""
|
|
|
|
# Do nothing else if in simulate mode
|
|
|
|
if self.params.get('simulate', False):
|
2009-05-24 11:07:51 +02:00
|
|
|
# Forced printings
|
|
|
|
if self.params.get('forcetitle', False):
|
2010-02-12 21:01:55 +01:00
|
|
|
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
|
2009-05-24 11:07:51 +02:00
|
|
|
if self.params.get('forceurl', False):
|
2010-02-12 21:01:55 +01:00
|
|
|
print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
|
2010-04-04 17:57:59 +02:00
|
|
|
if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
|
|
|
|
print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
|
|
|
|
if self.params.get('forcedescription', False) and 'description' in info_dict:
|
|
|
|
print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
|
2009-05-24 11:07:51 +02:00
|
|
|
|
2009-04-23 21:43:04 +02:00
|
|
|
return
|
2009-05-24 11:07:51 +02:00
|
|
|
|
2009-04-10 00:59:59 +02:00
|
|
|
try:
|
2009-05-04 19:31:00 +02:00
|
|
|
template_dict = dict(info_dict)
|
|
|
|
template_dict['epoch'] = unicode(long(time.time()))
|
2010-11-06 21:34:22 +01:00
|
|
|
template_dict['autonumber'] = unicode('%05d' % self._num_downloads)
|
2009-05-04 19:31:00 +02:00
|
|
|
filename = self.params['outtmpl'] % template_dict
|
2009-04-10 00:59:59 +02:00
|
|
|
except (ValueError, KeyError), err:
|
2010-08-12 18:40:36 +02:00
|
|
|
self.trouble(u'ERROR: invalid system charset or erroneous output template')
|
|
|
|
return
|
2009-09-20 00:11:11 +02:00
|
|
|
if self.params.get('nooverwrites', False) and os.path.exists(filename):
|
2010-09-11 09:52:25 +02:00
|
|
|
self.to_stderr(u'WARNING: file exists and will be skipped')
|
2009-04-23 21:43:04 +02:00
|
|
|
return
|
2009-04-28 07:30:20 +02:00
|
|
|
|
2009-04-10 00:59:59 +02:00
|
|
|
try:
|
|
|
|
self.pmkdir(filename)
|
|
|
|
except (OSError, IOError), err:
|
2010-08-12 18:42:26 +02:00
|
|
|
self.trouble(u'ERROR: unable to create directories: %s' % str(err))
|
2009-04-23 21:43:04 +02:00
|
|
|
return
|
2009-04-28 07:30:20 +02:00
|
|
|
|
2009-04-10 00:59:59 +02:00
|
|
|
try:
|
2010-05-30 19:49:51 +02:00
|
|
|
success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None))
|
2009-04-10 00:59:59 +02:00
|
|
|
except (OSError, IOError), err:
|
2010-07-22 20:26:37 +02:00
|
|
|
raise UnavailableVideoError
|
2009-04-10 00:59:59 +02:00
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2010-08-12 18:42:26 +02:00
|
|
|
self.trouble(u'ERROR: unable to download video data: %s' % str(err))
|
2009-04-23 21:43:04 +02:00
|
|
|
return
|
2009-05-21 20:59:02 +02:00
|
|
|
except (ContentTooShortError, ), err:
|
2010-08-12 18:42:26 +02:00
|
|
|
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
2009-05-21 20:59:02 +02:00
|
|
|
return
|
2009-04-28 07:30:20 +02:00
|
|
|
|
2009-06-07 01:11:50 +02:00
|
|
|
if success:
|
|
|
|
try:
|
|
|
|
self.post_process(filename, info_dict)
|
|
|
|
except (PostProcessingError), err:
|
2010-08-12 18:42:26 +02:00
|
|
|
self.trouble(u'ERROR: postprocessing: %s' % str(err))
|
2009-06-07 01:11:50 +02:00
|
|
|
return
|
2009-04-10 00:59:59 +02:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
def download(self, url_list):
|
|
|
|
"""Download a given list of URLs."""
|
2008-07-22 09:45:49 +02:00
|
|
|
if len(url_list) > 1 and self.fixed_template():
|
2009-03-04 22:12:33 +01:00
|
|
|
raise SameFileError(self.params['outtmpl'])
|
2008-07-22 09:45:49 +02:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
for url in url_list:
|
|
|
|
suitable_found = False
|
|
|
|
for ie in self._ies:
|
2009-04-10 00:59:59 +02:00
|
|
|
# Go to next InfoExtractor if not suitable
|
2008-07-21 23:12:31 +02:00
|
|
|
if not ie.suitable(url):
|
|
|
|
continue
|
2009-04-10 00:59:59 +02:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
# Suitable InfoExtractor found
|
|
|
|
suitable_found = True
|
2009-04-10 00:59:59 +02:00
|
|
|
|
2009-04-23 22:20:06 +02:00
|
|
|
# Extract information from URL and process it
|
|
|
|
ie.extract(url)
|
2008-07-27 12:13:49 +02:00
|
|
|
|
2009-04-10 00:59:59 +02:00
|
|
|
# Suitable InfoExtractor had been found; go to next URL
|
2008-07-21 23:12:31 +02:00
|
|
|
break
|
2009-04-10 00:59:59 +02:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
if not suitable_found:
|
2010-08-12 18:42:26 +02:00
|
|
|
self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
|
2008-07-22 11:16:32 +02:00
|
|
|
|
2009-04-23 21:43:04 +02:00
|
|
|
return self._download_retcode
|
2008-07-27 12:13:49 +02:00
|
|
|
|
|
|
|
def post_process(self, filename, ie_info):
|
|
|
|
"""Run the postprocessing chain on the given file."""
|
|
|
|
info = dict(ie_info)
|
|
|
|
info['filepath'] = filename
|
|
|
|
for pp in self._pps:
|
|
|
|
info = pp.run(info)
|
|
|
|
if info is None:
|
|
|
|
break
|
2008-07-21 23:12:31 +02:00
|
|
|
|
2010-05-30 19:49:51 +02:00
|
|
|
def _download_with_rtmpdump(self, filename, url, player_url):
|
2010-01-03 13:12:11 +01:00
|
|
|
self.report_destination(filename)
|
2010-12-04 10:38:53 +01:00
|
|
|
tmpfilename = self.temp_name(filename)
|
2010-01-03 13:12:11 +01:00
|
|
|
|
|
|
|
# Check for rtmpdump first
|
|
|
|
try:
|
|
|
|
subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
|
|
|
except (OSError, IOError):
|
|
|
|
self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
|
|
|
# the connection was interrumpted and resuming appears to be
|
|
|
|
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
2010-12-04 10:38:53 +01:00
|
|
|
basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
|
2010-01-19 20:04:56 +01:00
|
|
|
retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
|
|
|
|
while retval == 2 or retval == 1:
|
2010-12-04 10:38:53 +01:00
|
|
|
prevsize = os.path.getsize(tmpfilename)
|
2010-10-23 13:19:26 +02:00
|
|
|
self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
|
2010-05-30 19:49:51 +02:00
|
|
|
time.sleep(5.0) # This seems to be needed
|
2010-01-19 20:04:56 +01:00
|
|
|
retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
2010-12-04 10:38:53 +01:00
|
|
|
cursize = os.path.getsize(tmpfilename)
|
2010-05-30 19:49:51 +02:00
|
|
|
if prevsize == cursize and retval == 1:
|
|
|
|
break
|
2010-01-03 13:12:11 +01:00
|
|
|
if retval == 0:
|
2010-12-04 10:38:53 +01:00
|
|
|
self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename))
|
|
|
|
self.try_rename(tmpfilename, filename)
|
2010-01-03 13:12:11 +01:00
|
|
|
return True
|
|
|
|
else:
|
2010-08-12 18:42:26 +02:00
|
|
|
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
|
2010-01-03 13:12:11 +01:00
|
|
|
return False
|
|
|
|
|
2010-05-30 19:49:51 +02:00
|
|
|
def _do_download(self, filename, url, player_url):
|
2010-12-04 10:38:53 +01:00
|
|
|
# Check file already present
|
|
|
|
if self.params.get('continuedl', False) and os.path.isfile(filename):
|
|
|
|
self.report_file_already_downloaded(filename)
|
|
|
|
return True
|
|
|
|
|
2010-01-03 13:12:11 +01:00
|
|
|
# Attempt to download using rtmpdump
|
|
|
|
if url.startswith('rtmp'):
|
2010-05-30 19:49:51 +02:00
|
|
|
return self._download_with_rtmpdump(filename, url, player_url)
|
2010-01-03 13:12:11 +01:00
|
|
|
|
2010-12-04 10:38:53 +01:00
|
|
|
tmpfilename = self.temp_name(filename)
|
2009-06-07 01:11:50 +02:00
|
|
|
stream = None
|
2009-12-21 21:43:15 +01:00
|
|
|
open_mode = 'wb'
|
2009-05-27 22:50:18 +02:00
|
|
|
basic_request = urllib2.Request(url, None, std_headers)
|
2008-07-21 23:12:31 +02:00
|
|
|
request = urllib2.Request(url, None, std_headers)
|
2009-05-27 22:50:18 +02:00
|
|
|
|
2009-12-21 21:43:15 +01:00
|
|
|
# Establish possible resume length
|
2010-12-04 10:38:53 +01:00
|
|
|
if os.path.isfile(tmpfilename):
|
|
|
|
resume_len = os.path.getsize(tmpfilename)
|
2009-06-07 01:11:50 +02:00
|
|
|
else:
|
|
|
|
resume_len = 0
|
2009-12-21 21:43:15 +01:00
|
|
|
|
|
|
|
# Request parameters in case of being able to resume
|
2009-09-20 00:11:11 +02:00
|
|
|
if self.params.get('continuedl', False) and resume_len != 0:
|
2009-05-27 22:50:18 +02:00
|
|
|
self.report_resuming_byte(resume_len)
|
|
|
|
request.add_header('Range','bytes=%d-' % resume_len)
|
2009-12-21 21:43:15 +01:00
|
|
|
open_mode = 'ab'
|
2009-06-07 01:11:50 +02:00
|
|
|
|
2010-05-30 18:34:56 +02:00
|
|
|
count = 0
|
|
|
|
retries = self.params.get('retries', 0)
|
2010-07-27 20:11:06 +02:00
|
|
|
while count <= retries:
|
2010-05-30 18:34:56 +02:00
|
|
|
# Establish connection
|
|
|
|
try:
|
|
|
|
data = urllib2.urlopen(request)
|
|
|
|
break
|
|
|
|
except (urllib2.HTTPError, ), err:
|
2010-10-03 11:05:20 +02:00
|
|
|
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
2010-07-27 20:11:06 +02:00
|
|
|
# Unexpected HTTP error
|
2010-05-30 18:34:56 +02:00
|
|
|
raise
|
2010-07-27 20:11:06 +02:00
|
|
|
elif err.code == 416:
|
|
|
|
# Unable to resume (requested range not satisfiable)
|
|
|
|
try:
|
|
|
|
# Open the connection again without the range header
|
|
|
|
data = urllib2.urlopen(basic_request)
|
|
|
|
content_length = data.info()['Content-Length']
|
|
|
|
except (urllib2.HTTPError, ), err:
|
2010-10-03 11:05:20 +02:00
|
|
|
if err.code < 500 or err.code >= 600:
|
2010-07-27 20:11:06 +02:00
|
|
|
raise
|
|
|
|
else:
|
|
|
|
# Examine the reported length
|
2010-08-01 01:15:43 +02:00
|
|
|
if (content_length is not None and
|
2010-08-04 18:52:00 +02:00
|
|
|
(resume_len - 100 < long(content_length) < resume_len + 100)):
|
2010-08-01 01:15:43 +02:00
|
|
|
# The file had already been fully downloaded.
|
|
|
|
# Explanation to the above condition: in issue #175 it was revealed that
|
|
|
|
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
|
|
|
# changing the file size slightly and causing problems for some users. So
|
|
|
|
# I decided to implement a suggested change and consider the file
|
|
|
|
# completely downloaded if the file size differs less than 100 bytes from
|
|
|
|
# the one in the hard drive.
|
2010-07-27 20:11:06 +02:00
|
|
|
self.report_file_already_downloaded(filename)
|
2010-12-04 10:38:53 +01:00
|
|
|
self.try_rename(tmpfilename, filename)
|
2010-07-27 20:11:06 +02:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
# The length does not match, we start the download over
|
|
|
|
self.report_unable_to_resume()
|
|
|
|
open_mode = 'wb'
|
|
|
|
break
|
|
|
|
# Retry
|
|
|
|
count += 1
|
|
|
|
if count <= retries:
|
|
|
|
self.report_retry(count, retries)
|
|
|
|
|
|
|
|
if count > retries:
|
|
|
|
self.trouble(u'ERROR: giving up after %s retries' % retries)
|
|
|
|
return False
|
2009-05-27 22:50:18 +02:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
data_len = data.info().get('Content-length', None)
|
|
|
|
data_len_str = self.format_bytes(data_len)
|
|
|
|
byte_counter = 0
|
|
|
|
block_size = 1024
|
|
|
|
start = time.time()
|
|
|
|
while True:
|
2008-07-22 22:40:50 +02:00
|
|
|
# Download and write
|
2008-07-21 23:12:31 +02:00
|
|
|
before = time.time()
|
|
|
|
data_block = data.read(block_size)
|
|
|
|
after = time.time()
|
|
|
|
data_block_len = len(data_block)
|
|
|
|
if data_block_len == 0:
|
|
|
|
break
|
|
|
|
byte_counter += data_block_len
|
2009-06-07 01:11:50 +02:00
|
|
|
|
|
|
|
# Open file just in time
|
|
|
|
if stream is None:
|
|
|
|
try:
|
2010-12-04 10:38:53 +01:00
|
|
|
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
|
2009-06-07 01:11:50 +02:00
|
|
|
self.report_destination(filename)
|
|
|
|
except (OSError, IOError), err:
|
2010-08-12 18:42:26 +02:00
|
|
|
self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
|
2009-06-07 01:11:50 +02:00
|
|
|
return False
|
2010-04-17 18:49:56 +02:00
|
|
|
try:
|
|
|
|
stream.write(data_block)
|
|
|
|
except (IOError, OSError), err:
|
2010-08-12 18:41:29 +02:00
|
|
|
self.trouble(u'\nERROR: unable to write data: %s' % str(err))
|
|
|
|
return False
|
2008-07-21 23:12:31 +02:00
|
|
|
block_size = self.best_block_size(after - before, data_block_len)
|
|
|
|
|
2009-06-07 01:11:50 +02:00
|
|
|
# Progress message
|
|
|
|
percent_str = self.calc_percent(byte_counter, data_len)
|
|
|
|
eta_str = self.calc_eta(start, time.time(), data_len, byte_counter)
|
|
|
|
speed_str = self.calc_speed(start, time.time(), byte_counter)
|
|
|
|
self.report_progress(percent_str, data_len_str, speed_str, eta_str)
|
|
|
|
|
2008-07-24 09:47:07 +02:00
|
|
|
# Apply rate limit
|
|
|
|
self.slow_down(start, byte_counter)
|
|
|
|
|
2010-12-05 19:57:46 +01:00
|
|
|
stream.close()
|
2008-07-22 22:40:50 +02:00
|
|
|
self.report_finish()
|
2008-07-21 23:12:31 +02:00
|
|
|
if data_len is not None and str(byte_counter) != data_len:
|
2009-05-21 20:59:02 +02:00
|
|
|
raise ContentTooShortError(byte_counter, long(data_len))
|
2010-12-04 10:38:53 +01:00
|
|
|
self.try_rename(tmpfilename, filename)
|
2009-06-07 01:11:50 +02:00
|
|
|
return True
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
class InfoExtractor(object):
|
|
|
|
"""Information Extractor class.
|
|
|
|
|
|
|
|
Information extractors are the classes that, given a URL, extract
|
|
|
|
information from the video (or videos) the URL refers to. This
|
|
|
|
information includes the real video URL, the video title and simplified
|
2009-04-23 22:34:58 +02:00
|
|
|
title, author and others. The information is stored in a dictionary
|
|
|
|
which is then passed to the FileDownloader. The FileDownloader
|
|
|
|
processes this information possibly downloading the video to the file
|
|
|
|
system, among other possible outcomes. The dictionaries must include
|
2008-07-21 23:12:31 +02:00
|
|
|
the following fields:
|
|
|
|
|
|
|
|
id: Video identifier.
|
|
|
|
url: Final video URL.
|
|
|
|
uploader: Nickname of the video uploader.
|
|
|
|
title: Literal title.
|
|
|
|
stitle: Simplified title.
|
|
|
|
ext: Video filename extension.
|
2010-03-19 18:15:43 +01:00
|
|
|
format: Video format.
|
2010-05-30 19:49:51 +02:00
|
|
|
player_url: SWF Player URL (may be None).
|
2008-07-21 23:12:31 +02:00
|
|
|
|
2010-04-04 17:57:59 +02:00
|
|
|
The following fields are optional. Their primary purpose is to allow
|
|
|
|
youtube-dl to serve as the backend for a video search function, such
|
|
|
|
as the one in youtube2mp3. They are only used when their respective
|
|
|
|
forced printing functions are called:
|
|
|
|
|
|
|
|
thumbnail: Full URL to a video thumbnail image.
|
|
|
|
description: One-line video description.
|
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
Subclasses of this one should re-define the _real_initialize() and
|
|
|
|
_real_extract() methods, as well as the suitable() static method.
|
|
|
|
Probably, they should also be instantiated and added to the main
|
|
|
|
downloader.
|
|
|
|
"""
|
|
|
|
|
|
|
|
_ready = False
|
|
|
|
_downloader = None
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
"""Constructor. Receives an optional downloader."""
|
|
|
|
self._ready = False
|
|
|
|
self.set_downloader(downloader)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
"""Receives a URL and returns True if suitable for this IE."""
|
2008-07-24 15:53:24 +02:00
|
|
|
return False
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
def initialize(self):
|
2008-07-22 11:41:25 +02:00
|
|
|
"""Initializes an instance (authentication, etc)."""
|
2008-07-21 23:12:31 +02:00
|
|
|
if not self._ready:
|
|
|
|
self._real_initialize()
|
|
|
|
self._ready = True
|
|
|
|
|
|
|
|
def extract(self, url):
|
|
|
|
"""Extracts URL information and returns it in list of dicts."""
|
|
|
|
self.initialize()
|
|
|
|
return self._real_extract(url)
|
|
|
|
|
|
|
|
def set_downloader(self, downloader):
|
|
|
|
"""Sets the downloader for this IE."""
|
|
|
|
self._downloader = downloader
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
"""Real initialization process. Redefine in subclasses."""
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
"""Real extraction process. Redefine in subclasses."""
|
|
|
|
pass
|
|
|
|
|
|
|
|
class YoutubeIE(InfoExtractor):
|
|
|
|
"""Information extractor for youtube.com."""
|
|
|
|
|
2010-10-11 11:21:28 +02:00
|
|
|
_VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/(?:(?:v/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))))?([0-9A-Za-z_-]+)(?(1).+)?$'
|
2010-07-22 20:24:59 +02:00
|
|
|
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
|
2010-10-09 12:28:15 +02:00
|
|
|
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
|
2009-01-31 10:12:22 +01:00
|
|
|
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
|
2008-07-21 23:12:31 +02:00
|
|
|
_NETRC_MACHINE = 'youtube'
|
2010-07-22 20:29:52 +02:00
|
|
|
# Listed in order of quality
|
|
|
|
_available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13']
|
2009-04-28 07:30:20 +02:00
|
|
|
_video_extensions = {
|
|
|
|
'13': '3gp',
|
|
|
|
'17': 'mp4',
|
|
|
|
'18': 'mp4',
|
|
|
|
'22': 'mp4',
|
2009-11-20 21:51:38 +01:00
|
|
|
'37': 'mp4',
|
2010-07-13 19:01:43 +02:00
|
|
|
'38': 'video', # You actually don't know if this will be MOV, AVI or whatever
|
2010-05-30 19:46:08 +02:00
|
|
|
'43': 'webm',
|
|
|
|
'45': 'webm',
|
2009-04-28 07:30:20 +02:00
|
|
|
}
|
2008-07-21 23:12:31 +02:00
|
|
|
|
2008-07-24 15:53:24 +02:00
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(YoutubeIE._VALID_URL, url) is not None)
|
|
|
|
|
2009-01-31 10:12:22 +01:00
|
|
|
def report_lang(self):
|
|
|
|
"""Report attempt to set language."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] Setting language')
|
2009-01-31 10:12:22 +01:00
|
|
|
|
2008-07-22 22:40:50 +02:00
|
|
|
def report_login(self):
|
|
|
|
"""Report attempt to log in."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] Logging in')
|
2008-07-22 22:40:50 +02:00
|
|
|
|
|
|
|
def report_age_confirmation(self):
|
|
|
|
"""Report attempt to confirm age."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] Confirming age')
|
2008-07-22 22:40:50 +02:00
|
|
|
|
2010-05-30 19:49:51 +02:00
|
|
|
def report_video_webpage_download(self, video_id):
|
|
|
|
"""Report attempt to download video webpage."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
|
2010-05-30 19:49:51 +02:00
|
|
|
|
2009-08-08 14:56:06 +02:00
|
|
|
def report_video_info_webpage_download(self, video_id):
|
|
|
|
"""Report attempt to download video info webpage."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
|
2008-07-22 22:40:50 +02:00
|
|
|
|
|
|
|
def report_information_extraction(self, video_id):
|
|
|
|
"""Report attempt to extract video information."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
|
2008-07-22 22:40:50 +02:00
|
|
|
|
2009-04-28 07:30:20 +02:00
|
|
|
def report_unavailable_format(self, video_id, format):
|
|
|
|
"""Report extracted video URL."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
|
2009-04-28 07:30:20 +02:00
|
|
|
|
2010-01-03 13:12:11 +01:00
|
|
|
def report_rtmp_download(self):
|
|
|
|
"""Indicate the download will use the RTMP protocol."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] RTMP download detected')
|
2010-01-03 13:12:11 +01:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
def _real_initialize(self):
|
|
|
|
if self._downloader is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
username = None
|
|
|
|
password = None
|
2009-03-04 22:12:33 +01:00
|
|
|
downloader_params = self._downloader.params
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
# Attempt to use provided username and password or .netrc data
|
|
|
|
if downloader_params.get('username', None) is not None:
|
|
|
|
username = downloader_params['username']
|
|
|
|
password = downloader_params['password']
|
|
|
|
elif downloader_params.get('usenetrc', False):
|
|
|
|
try:
|
|
|
|
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
|
|
|
|
if info is not None:
|
|
|
|
username = info[0]
|
|
|
|
password = info[2]
|
|
|
|
else:
|
|
|
|
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
|
|
|
except (IOError, netrc.NetrcParseError), err:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
|
2008-07-21 23:12:31 +02:00
|
|
|
return
|
|
|
|
|
2009-01-31 10:12:22 +01:00
|
|
|
# Set language
|
2009-03-02 00:02:56 +01:00
|
|
|
request = urllib2.Request(self._LANG_URL, None, std_headers)
|
2009-01-31 10:12:22 +01:00
|
|
|
try:
|
|
|
|
self.report_lang()
|
|
|
|
urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
|
2009-01-31 10:12:22 +01:00
|
|
|
return
|
|
|
|
|
2009-03-02 00:02:56 +01:00
|
|
|
# No authentication to be performed
|
|
|
|
if username is None:
|
|
|
|
return
|
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
# Log in
|
2008-07-21 23:53:06 +02:00
|
|
|
login_form = {
|
|
|
|
'current_form': 'loginForm',
|
2008-07-21 23:12:31 +02:00
|
|
|
'next': '/',
|
|
|
|
'action_login': 'Log In',
|
|
|
|
'username': username,
|
2008-07-21 23:53:06 +02:00
|
|
|
'password': password,
|
|
|
|
}
|
2008-07-21 23:12:31 +02:00
|
|
|
request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form), std_headers)
|
|
|
|
try:
|
2008-07-22 22:40:50 +02:00
|
|
|
self.report_login()
|
2008-07-21 23:12:31 +02:00
|
|
|
login_results = urllib2.urlopen(request).read()
|
|
|
|
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
|
2008-07-21 23:12:31 +02:00
|
|
|
return
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
|
2008-07-21 23:12:31 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
# Confirm age
|
2008-07-21 23:53:06 +02:00
|
|
|
age_form = {
|
|
|
|
'next_url': '/',
|
|
|
|
'action_confirm': 'Confirm',
|
|
|
|
}
|
2008-07-21 23:12:31 +02:00
|
|
|
request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form), std_headers)
|
|
|
|
try:
|
2008-07-22 22:40:50 +02:00
|
|
|
self.report_age_confirmation()
|
2008-07-21 23:12:31 +02:00
|
|
|
age_results = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
|
2008-07-22 15:52:56 +02:00
|
|
|
return
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract video id from URL
|
2008-07-24 15:53:24 +02:00
|
|
|
mobj = re.match(self._VALID_URL, url)
|
2008-07-21 23:12:31 +02:00
|
|
|
if mobj is None:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2008-07-21 23:12:31 +02:00
|
|
|
video_id = mobj.group(2)
|
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
# Get video webpage
|
|
|
|
self.report_video_webpage_download(video_id)
|
2010-11-16 22:52:23 +01:00
|
|
|
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id, None, std_headers)
|
2010-07-22 20:29:52 +02:00
|
|
|
try:
|
|
|
|
video_webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
|
|
|
|
return
|
2009-05-24 11:09:30 +02:00
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
# Attempt to extract SWF player URL
|
2010-11-16 22:52:23 +01:00
|
|
|
mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
|
2010-07-22 20:29:52 +02:00
|
|
|
if mobj is not None:
|
2010-11-16 22:52:23 +01:00
|
|
|
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
|
2010-07-22 20:29:52 +02:00
|
|
|
else:
|
|
|
|
player_url = None
|
|
|
|
|
|
|
|
# Get video info
|
|
|
|
self.report_video_info_webpage_download(video_id)
|
|
|
|
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
|
|
|
|
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
|
|
|
|
% (video_id, el_type))
|
|
|
|
request = urllib2.Request(video_info_url, None, std_headers)
|
2010-05-30 19:49:51 +02:00
|
|
|
try:
|
2010-07-22 20:29:52 +02:00
|
|
|
video_info_webpage = urllib2.urlopen(request).read()
|
|
|
|
video_info = parse_qs(video_info_webpage)
|
|
|
|
if 'token' in video_info:
|
|
|
|
break
|
2010-05-30 19:49:51 +02:00
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2010-07-22 20:29:52 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
|
2010-05-30 19:49:51 +02:00
|
|
|
return
|
2010-07-25 11:55:49 +02:00
|
|
|
if 'token' not in video_info:
|
|
|
|
if 'reason' in video_info:
|
2010-08-22 00:48:55 +02:00
|
|
|
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
|
2010-07-25 11:55:49 +02:00
|
|
|
else:
|
|
|
|
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
|
|
|
|
return
|
|
|
|
|
|
|
|
# Start extracting information
|
2010-07-22 20:29:52 +02:00
|
|
|
self.report_information_extraction(video_id)
|
|
|
|
|
|
|
|
# uploader
|
|
|
|
if 'author' not in video_info:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
|
|
|
|
return
|
|
|
|
video_uploader = urllib.unquote_plus(video_info['author'][0])
|
2010-05-30 19:49:51 +02:00
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
# title
|
|
|
|
if 'title' not in video_info:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
return
|
|
|
|
video_title = urllib.unquote_plus(video_info['title'][0])
|
|
|
|
video_title = video_title.decode('utf-8')
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
|
|
|
# simplified title
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
simple_title = simple_title.strip(ur'_')
|
|
|
|
|
|
|
|
# thumbnail image
|
|
|
|
if 'thumbnail_url' not in video_info:
|
|
|
|
self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
|
|
|
|
video_thumbnail = ''
|
|
|
|
else: # don't panic if we can't find it
|
|
|
|
video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
|
|
|
|
|
2010-11-17 19:55:30 +01:00
|
|
|
# upload date
|
|
|
|
upload_date = u'NA'
|
|
|
|
mobj = re.search(r'id="eow-date".*?>(.*?)</span>', video_webpage, re.DOTALL)
|
|
|
|
if mobj is not None:
|
2010-11-30 17:51:00 +01:00
|
|
|
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
|
|
|
|
format_expressions = ['%d %B %Y', '%B %d %Y']
|
|
|
|
for expression in format_expressions:
|
|
|
|
try:
|
|
|
|
upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
|
|
|
|
except:
|
|
|
|
pass
|
2010-11-17 19:55:30 +01:00
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
# description
|
|
|
|
video_description = 'No description available.'
|
|
|
|
if self._downloader.params.get('forcedescription', False):
|
|
|
|
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
|
2010-05-30 19:49:51 +02:00
|
|
|
if mobj is not None:
|
2010-07-22 20:29:52 +02:00
|
|
|
video_description = mobj.group(1)
|
|
|
|
|
2010-07-24 10:23:06 +02:00
|
|
|
# token
|
|
|
|
video_token = urllib.unquote_plus(video_info['token'][0])
|
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
# Decide which formats to download
|
2010-07-24 09:47:01 +02:00
|
|
|
requested_format = self._downloader.params.get('format', None)
|
2010-07-24 10:23:06 +02:00
|
|
|
get_video_template = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=&ps=&asv=&fmt=%%s' % (video_id, video_token)
|
2010-07-24 09:47:01 +02:00
|
|
|
|
2010-07-24 10:23:06 +02:00
|
|
|
if 'fmt_url_map' in video_info:
|
2010-07-22 20:29:52 +02:00
|
|
|
url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
|
|
|
|
format_limit = self._downloader.params.get('format_limit', None)
|
|
|
|
if format_limit is not None and format_limit in self._available_formats:
|
|
|
|
format_list = self._available_formats[self._available_formats.index(format_limit):]
|
2010-05-30 19:49:51 +02:00
|
|
|
else:
|
2010-07-22 20:29:52 +02:00
|
|
|
format_list = self._available_formats
|
|
|
|
existing_formats = [x for x in format_list if x in url_map]
|
|
|
|
if len(existing_formats) == 0:
|
|
|
|
self._downloader.trouble(u'ERROR: no known formats available for video')
|
2009-05-24 11:09:30 +02:00
|
|
|
return
|
2010-07-22 20:29:52 +02:00
|
|
|
if requested_format is None:
|
2010-12-09 19:22:32 +01:00
|
|
|
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
|
2010-07-22 20:29:52 +02:00
|
|
|
elif requested_format == '-1':
|
2010-12-09 19:22:32 +01:00
|
|
|
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
|
2010-07-22 20:29:52 +02:00
|
|
|
else:
|
2010-07-24 10:23:06 +02:00
|
|
|
video_url_list = [(requested_format, get_video_template % requested_format)] # Specific format
|
2010-07-24 09:47:01 +02:00
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
|
|
|
|
self.report_rtmp_download()
|
|
|
|
video_url_list = [(None, video_info['conn'][0])]
|
2010-07-24 09:47:01 +02:00
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
else:
|
|
|
|
self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
|
|
|
|
return
|
2009-04-28 07:30:20 +02:00
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
for format_param, video_real_url in video_url_list:
|
|
|
|
# At this point we have a new video
|
|
|
|
self._downloader.increment_downloads()
|
|
|
|
|
|
|
|
# Extension
|
|
|
|
video_extension = self._video_extensions.get(format_param, 'flv')
|
2010-04-04 17:57:59 +02:00
|
|
|
|
2010-07-22 20:29:52 +02:00
|
|
|
# Find the video URL in fmt_url_map or conn paramters
|
2009-05-24 11:09:30 +02:00
|
|
|
try:
|
2009-04-28 07:30:20 +02:00
|
|
|
# Process video information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
'url': video_real_url.decode('utf-8'),
|
|
|
|
'uploader': video_uploader.decode('utf-8'),
|
2010-11-19 19:31:26 +01:00
|
|
|
'upload_date': upload_date,
|
2009-04-28 07:30:20 +02:00
|
|
|
'title': video_title,
|
|
|
|
'stitle': simple_title,
|
|
|
|
'ext': video_extension.decode('utf-8'),
|
2010-03-19 18:15:43 +01:00
|
|
|
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
|
2010-04-04 17:57:59 +02:00
|
|
|
'thumbnail': video_thumbnail.decode('utf-8'),
|
|
|
|
'description': video_description.decode('utf-8'),
|
2010-05-30 19:49:51 +02:00
|
|
|
'player_url': player_url,
|
2009-04-28 07:30:20 +02:00
|
|
|
})
|
2010-07-22 20:29:52 +02:00
|
|
|
except UnavailableVideoError, err:
|
2010-07-24 10:23:06 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to download video (format may not be available)')
|
2009-04-28 23:39:23 +02:00
|
|
|
|
2008-07-21 23:12:31 +02:00
|
|
|
|
2008-07-24 15:53:24 +02:00
|
|
|
class MetacafeIE(InfoExtractor):
|
|
|
|
"""Information Extractor for metacafe.com."""
|
|
|
|
|
|
|
|
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
|
2008-09-13 13:23:24 +02:00
|
|
|
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
|
2009-04-25 11:52:33 +02:00
|
|
|
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
|
2008-07-24 15:53:24 +02:00
|
|
|
_youtube_ie = None
|
|
|
|
|
|
|
|
def __init__(self, youtube_ie, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
self._youtube_ie = youtube_ie
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(MetacafeIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_disclaimer(self):
|
|
|
|
"""Report disclaimer retrieval."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
|
2008-07-24 15:53:24 +02:00
|
|
|
|
|
|
|
def report_age_confirmation(self):
|
|
|
|
"""Report attempt to confirm age."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[metacafe] Confirming age')
|
2008-07-24 15:53:24 +02:00
|
|
|
|
|
|
|
def report_download_webpage(self, video_id):
|
|
|
|
"""Report webpage download."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
|
2008-07-24 15:53:24 +02:00
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
"""Report information extraction."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
|
2008-07-24 15:53:24 +02:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
# Retrieve disclaimer
|
|
|
|
request = urllib2.Request(self._DISCLAIMER, None, std_headers)
|
|
|
|
try:
|
|
|
|
self.report_disclaimer()
|
|
|
|
disclaimer = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
|
2008-07-24 15:53:24 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
# Confirm age
|
|
|
|
disclaimer_form = {
|
2008-09-13 13:23:24 +02:00
|
|
|
'filters': '0',
|
2008-07-24 15:53:24 +02:00
|
|
|
'submit': "Continue - I'm over 18",
|
|
|
|
}
|
2009-04-25 11:52:33 +02:00
|
|
|
request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form), std_headers)
|
2008-07-24 15:53:24 +02:00
|
|
|
try:
|
|
|
|
self.report_age_confirmation()
|
|
|
|
disclaimer = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
|
2008-07-24 15:53:24 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract id and simplified title from URL
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2008-07-24 15:53:24 +02:00
|
|
|
|
|
|
|
video_id = mobj.group(1)
|
|
|
|
|
|
|
|
# Check if video comes from YouTube
|
|
|
|
mobj2 = re.match(r'^yt-(.*)$', video_id)
|
|
|
|
if mobj2 is not None:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
|
|
|
|
return
|
2008-07-24 15:53:24 +02:00
|
|
|
|
2010-07-13 19:37:07 +02:00
|
|
|
# At this point we have a new video
|
2010-07-22 20:27:35 +02:00
|
|
|
self._downloader.increment_downloads()
|
2010-07-13 19:37:07 +02:00
|
|
|
|
2008-07-24 15:53:24 +02:00
|
|
|
simple_title = mobj.group(2).decode('utf-8')
|
|
|
|
|
|
|
|
# Retrieve video webpage to extract further information
|
|
|
|
request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
|
|
|
|
try:
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2008-07-24 15:53:24 +02:00
|
|
|
|
|
|
|
# Extract URL, uploader and title from webpage
|
|
|
|
self.report_extraction(video_id)
|
2009-08-02 12:18:52 +02:00
|
|
|
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
|
2010-08-12 19:15:26 +02:00
|
|
|
if mobj is not None:
|
|
|
|
mediaURL = urllib.unquote(mobj.group(1))
|
2010-08-12 19:21:06 +02:00
|
|
|
video_extension = mediaURL[-3:]
|
2010-08-12 19:15:26 +02:00
|
|
|
|
|
|
|
# Extract gdaKey if available
|
|
|
|
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
video_url = mediaURL
|
|
|
|
else:
|
|
|
|
gdaKey = mobj.group(1)
|
|
|
|
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
|
2010-08-04 19:05:53 +02:00
|
|
|
else:
|
2010-08-12 19:15:26 +02:00
|
|
|
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
|
|
|
return
|
|
|
|
vardict = parse_qs(mobj.group(1))
|
|
|
|
if 'mediaData' not in vardict:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
|
|
|
return
|
|
|
|
mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
|
|
|
return
|
2010-08-12 19:21:06 +02:00
|
|
|
mediaURL = mobj.group(1).replace('\\/', '/')
|
|
|
|
video_extension = mediaURL[-3:]
|
|
|
|
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
|
2008-07-24 15:53:24 +02:00
|
|
|
|
2008-09-13 13:23:24 +02:00
|
|
|
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
|
2008-07-24 15:53:24 +02:00
|
|
|
if mobj is None:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2008-07-24 15:53:24 +02:00
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
2010-02-12 21:01:55 +01:00
|
|
|
video_title = sanitize_title(video_title)
|
2008-07-24 15:53:24 +02:00
|
|
|
|
2009-11-24 20:40:34 +01:00
|
|
|
mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage)
|
2008-07-24 15:53:24 +02:00
|
|
|
if mobj is None:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2009-04-25 11:52:33 +02:00
|
|
|
video_uploader = mobj.group(1)
|
2008-07-24 15:53:24 +02:00
|
|
|
|
2009-04-28 23:39:23 +02:00
|
|
|
try:
|
|
|
|
# Process video information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
'url': video_url.decode('utf-8'),
|
|
|
|
'uploader': video_uploader.decode('utf-8'),
|
2010-11-19 19:31:26 +01:00
|
|
|
'upload_date': u'NA',
|
2009-04-28 23:39:23 +02:00
|
|
|
'title': video_title,
|
|
|
|
'stitle': simple_title,
|
|
|
|
'ext': video_extension.decode('utf-8'),
|
2010-03-19 18:15:43 +01:00
|
|
|
'format': u'NA',
|
2010-05-30 19:49:51 +02:00
|
|
|
'player_url': None,
|
2009-04-28 23:39:23 +02:00
|
|
|
})
|
2010-07-22 20:26:37 +02:00
|
|
|
except UnavailableVideoError:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video')
|
2008-07-24 15:53:24 +02:00
|
|
|
|
2009-02-02 19:59:48 +01:00
|
|
|
|
2010-07-02 01:53:47 +02:00
|
|
|
class DailymotionIE(InfoExtractor):
|
|
|
|
"""Information Extractor for Dailymotion"""
|
|
|
|
|
|
|
|
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(DailymotionIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_download_webpage(self, video_id):
|
|
|
|
"""Report webpage download."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
|
2010-07-02 01:53:47 +02:00
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
"""Report information extraction."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
|
2010-07-02 01:53:47 +02:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
return
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract id and simplified title from URL
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
|
2010-07-13 19:37:07 +02:00
|
|
|
# At this point we have a new video
|
2010-07-22 20:27:35 +02:00
|
|
|
self._downloader.increment_downloads()
|
2010-07-02 01:53:47 +02:00
|
|
|
video_id = mobj.group(1)
|
|
|
|
|
|
|
|
simple_title = mobj.group(2).decode('utf-8')
|
|
|
|
video_extension = 'flv'
|
|
|
|
|
|
|
|
# Retrieve video webpage to extract further information
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
try:
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract URL, uploader and title from webpage
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
|
|
|
return
|
|
|
|
mediaURL = urllib.unquote(mobj.group(1))
|
|
|
|
|
|
|
|
# if needed add http://www.dailymotion.com/ if relative URL
|
|
|
|
|
|
|
|
video_url = mediaURL
|
|
|
|
|
|
|
|
# '<meta\s+name="title"\s+content="Dailymotion\s*[:\-]\s*(.*?)"\s*\/\s*>'
|
|
|
|
mobj = re.search(r'(?im)<title>Dailymotion\s*[\-:]\s*(.+?)</title>', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
|
|
|
return
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
video_title = sanitize_title(video_title)
|
|
|
|
|
2010-09-11 09:47:21 +02:00
|
|
|
mobj = re.search(r'(?im)<div class="dmco_html owner">.*?<a class="name" href="/.+?">(.+?)</a>', webpage)
|
2010-07-02 01:53:47 +02:00
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
|
|
|
|
return
|
|
|
|
video_uploader = mobj.group(1)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Process video information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
'url': video_url.decode('utf-8'),
|
|
|
|
'uploader': video_uploader.decode('utf-8'),
|
2010-11-19 19:31:26 +01:00
|
|
|
'upload_date': u'NA',
|
2010-07-02 01:53:47 +02:00
|
|
|
'title': video_title,
|
|
|
|
'stitle': simple_title,
|
|
|
|
'ext': video_extension.decode('utf-8'),
|
|
|
|
'format': u'NA',
|
|
|
|
'player_url': None,
|
|
|
|
})
|
2010-07-22 20:26:37 +02:00
|
|
|
except UnavailableVideoError:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video')
|
2010-07-02 01:53:47 +02:00
|
|
|
|
2010-01-15 22:26:41 +01:00
|
|
|
class GoogleIE(InfoExtractor):
|
|
|
|
"""Information extractor for video.google.com."""
|
|
|
|
|
2010-02-12 21:01:55 +01:00
|
|
|
_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(GoogleIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_download_webpage(self, video_id):
|
|
|
|
"""Report webpage download."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
"""Report information extraction."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
return
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract id from URL
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
|
2010-07-13 19:37:07 +02:00
|
|
|
# At this point we have a new video
|
2010-07-22 20:27:35 +02:00
|
|
|
self._downloader.increment_downloads()
|
2010-01-15 22:26:41 +01:00
|
|
|
video_id = mobj.group(1)
|
|
|
|
|
|
|
|
video_extension = 'mp4'
|
|
|
|
|
|
|
|
# Retrieve video webpage to extract further information
|
2010-02-12 21:01:55 +01:00
|
|
|
request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
|
2010-01-15 22:26:41 +01:00
|
|
|
try:
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract URL, uploader, and title from webpage
|
|
|
|
self.report_extraction(video_id)
|
2010-02-12 21:01:55 +01:00
|
|
|
mobj = re.search(r"download_url:'([^']+)'", webpage)
|
|
|
|
if mobj is None:
|
|
|
|
video_extension = 'flv'
|
|
|
|
mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
|
2010-01-15 22:26:41 +01:00
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
|
|
|
return
|
|
|
|
mediaURL = urllib.unquote(mobj.group(1))
|
|
|
|
mediaURL = mediaURL.replace('\\x3d', '\x3d')
|
|
|
|
mediaURL = mediaURL.replace('\\x26', '\x26')
|
|
|
|
|
|
|
|
video_url = mediaURL
|
|
|
|
|
|
|
|
mobj = re.search(r'<title>(.*)</title>', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
|
|
|
return
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
2010-02-12 21:01:55 +01:00
|
|
|
video_title = sanitize_title(video_title)
|
2010-02-21 00:13:34 +01:00
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
2010-01-15 22:26:41 +01:00
|
|
|
|
2010-04-04 17:57:59 +02:00
|
|
|
# Extract video description
|
|
|
|
mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video description')
|
|
|
|
return
|
|
|
|
video_description = mobj.group(1).decode('utf-8')
|
|
|
|
if not video_description:
|
|
|
|
video_description = 'No description available.'
|
|
|
|
|
|
|
|
# Extract video thumbnail
|
|
|
|
if self._downloader.params.get('forcethumbnail', False):
|
|
|
|
request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
|
|
|
|
try:
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
|
|
|
|
return
|
|
|
|
video_thumbnail = mobj.group(1)
|
|
|
|
else: # we need something to pass to process_info
|
|
|
|
video_thumbnail = ''
|
|
|
|
|
|
|
|
|
2010-01-15 22:26:41 +01:00
|
|
|
try:
|
|
|
|
# Process video information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
'url': video_url.decode('utf-8'),
|
2010-03-19 18:15:43 +01:00
|
|
|
'uploader': u'NA',
|
2010-11-19 19:31:26 +01:00
|
|
|
'upload_date': u'NA',
|
2010-02-12 21:01:55 +01:00
|
|
|
'title': video_title,
|
2010-02-21 00:13:34 +01:00
|
|
|
'stitle': simple_title,
|
2010-01-15 22:26:41 +01:00
|
|
|
'ext': video_extension.decode('utf-8'),
|
2010-03-19 18:15:43 +01:00
|
|
|
'format': u'NA',
|
2010-05-30 19:49:51 +02:00
|
|
|
'player_url': None,
|
2010-01-15 22:26:41 +01:00
|
|
|
})
|
2010-07-22 20:26:37 +02:00
|
|
|
except UnavailableVideoError:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video')
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
|
|
|
|
class PhotobucketIE(InfoExtractor):
|
|
|
|
"""Information extractor for photobucket.com."""
|
|
|
|
|
|
|
|
_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(PhotobucketIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_download_webpage(self, video_id):
|
|
|
|
"""Report webpage download."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
"""Report information extraction."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
return
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract id from URL
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
|
2010-07-13 19:37:07 +02:00
|
|
|
# At this point we have a new video
|
2010-07-22 20:27:35 +02:00
|
|
|
self._downloader.increment_downloads()
|
2010-01-15 22:26:41 +01:00
|
|
|
video_id = mobj.group(1)
|
|
|
|
|
|
|
|
video_extension = 'flv'
|
|
|
|
|
|
|
|
# Retrieve video webpage to extract further information
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
try:
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract URL, uploader, and title from webpage
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
|
|
|
return
|
|
|
|
mediaURL = urllib.unquote(mobj.group(1))
|
|
|
|
|
|
|
|
video_url = mediaURL
|
|
|
|
|
|
|
|
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
|
|
|
return
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
2010-02-12 21:01:55 +01:00
|
|
|
video_title = sanitize_title(video_title)
|
2010-02-21 00:13:34 +01:00
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
video_uploader = mobj.group(2).decode('utf-8')
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Process video information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
'url': video_url.decode('utf-8'),
|
2010-02-12 21:01:55 +01:00
|
|
|
'uploader': video_uploader,
|
2010-11-19 19:31:26 +01:00
|
|
|
'upload_date': u'NA',
|
2010-02-12 21:01:55 +01:00
|
|
|
'title': video_title,
|
2010-02-21 00:13:34 +01:00
|
|
|
'stitle': simple_title,
|
2010-02-12 21:01:55 +01:00
|
|
|
'ext': video_extension.decode('utf-8'),
|
2010-03-19 18:15:43 +01:00
|
|
|
'format': u'NA',
|
2010-05-30 19:49:51 +02:00
|
|
|
'player_url': None,
|
2010-02-12 21:01:55 +01:00
|
|
|
})
|
2010-07-22 20:26:37 +02:00
|
|
|
except UnavailableVideoError:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video')
|
2010-02-12 21:01:55 +01:00
|
|
|
|
|
|
|
|
2010-04-01 20:55:43 +02:00
|
|
|
class YahooIE(InfoExtractor):
|
|
|
|
"""Information extractor for video.yahoo.com."""
|
|
|
|
|
|
|
|
# _VALID_URL matches all Yahoo! Video URLs
|
|
|
|
# _VPAGE_URL matches only the extractable '/watch/' URLs
|
|
|
|
_VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
|
|
|
|
_VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(YahooIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_download_webpage(self, video_id):
|
|
|
|
"""Report webpage download."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
|
2010-04-01 20:55:43 +02:00
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
"""Report information extraction."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
|
2010-04-01 20:55:43 +02:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
return
|
|
|
|
|
2010-07-13 19:37:07 +02:00
|
|
|
def _real_extract(self, url, new_video=True):
|
2010-04-01 20:55:43 +02:00
|
|
|
# Extract ID from URL
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
|
2010-07-13 19:37:07 +02:00
|
|
|
# At this point we have a new video
|
2010-07-22 20:27:35 +02:00
|
|
|
self._downloader.increment_downloads()
|
2010-04-01 20:55:43 +02:00
|
|
|
video_id = mobj.group(2)
|
|
|
|
video_extension = 'flv'
|
|
|
|
|
|
|
|
# Rewrite valid but non-extractable URLs as
|
|
|
|
# extractable English language /watch/ URLs
|
|
|
|
if re.match(self._VPAGE_URL, url) is None:
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
try:
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to extract id field')
|
|
|
|
return
|
|
|
|
yahoo_id = mobj.group(1)
|
|
|
|
|
|
|
|
mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to extract vid field')
|
|
|
|
return
|
|
|
|
yahoo_vid = mobj.group(1)
|
|
|
|
|
|
|
|
url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
|
2010-07-13 19:37:07 +02:00
|
|
|
return self._real_extract(url, new_video=False)
|
2010-04-01 20:55:43 +02:00
|
|
|
|
|
|
|
# Retrieve video webpage to extract further information
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
try:
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract uploader and title from webpage
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
return
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
|
|
|
|
|
|
|
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video uploader')
|
|
|
|
return
|
|
|
|
video_uploader = mobj.group(1).decode('utf-8')
|
|
|
|
|
2010-04-04 17:57:59 +02:00
|
|
|
# Extract video thumbnail
|
|
|
|
mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
|
|
|
|
return
|
|
|
|
video_thumbnail = mobj.group(1).decode('utf-8')
|
|
|
|
|
|
|
|
# Extract video description
|
|
|
|
mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video description')
|
|
|
|
return
|
|
|
|
video_description = mobj.group(1).decode('utf-8')
|
|
|
|
if not video_description: video_description = 'No description available.'
|
|
|
|
|
2010-04-01 20:55:43 +02:00
|
|
|
# Extract video height and width
|
|
|
|
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video height')
|
|
|
|
return
|
|
|
|
yv_video_height = mobj.group(1)
|
|
|
|
|
|
|
|
mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video width')
|
|
|
|
return
|
|
|
|
yv_video_width = mobj.group(1)
|
|
|
|
|
|
|
|
# Retrieve video playlist to extract media URL
|
|
|
|
# I'm not completely sure what all these options are, but we
|
|
|
|
# seem to need most of them, otherwise the server sends a 401.
|
|
|
|
yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
|
|
|
|
yv_bitrate = '700' # according to Wikipedia this is hard-coded
|
|
|
|
request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
|
|
|
|
'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
|
|
|
|
'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
|
|
|
|
try:
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract media URL from playlist XML
|
|
|
|
mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to extract media URL')
|
|
|
|
return
|
|
|
|
video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
|
|
|
|
video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Process video information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
'url': video_url,
|
|
|
|
'uploader': video_uploader,
|
2010-11-19 19:31:26 +01:00
|
|
|
'upload_date': u'NA',
|
2010-04-01 20:55:43 +02:00
|
|
|
'title': video_title,
|
|
|
|
'stitle': simple_title,
|
|
|
|
'ext': video_extension.decode('utf-8'),
|
2010-04-04 17:57:59 +02:00
|
|
|
'thumbnail': video_thumbnail.decode('utf-8'),
|
|
|
|
'description': video_description,
|
|
|
|
'thumbnail': video_thumbnail,
|
|
|
|
'description': video_description,
|
2010-05-30 19:49:51 +02:00
|
|
|
'player_url': None,
|
2010-04-01 20:55:43 +02:00
|
|
|
})
|
2010-07-22 20:26:37 +02:00
|
|
|
except UnavailableVideoError:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video')
|
2010-04-01 20:55:43 +02:00
|
|
|
|
|
|
|
|
2010-02-12 21:01:55 +01:00
|
|
|
class GenericIE(InfoExtractor):
|
|
|
|
"""Generic last-resort information extractor."""
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def report_download_webpage(self, video_id):
|
|
|
|
"""Report webpage download."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
|
|
|
|
self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
|
2010-02-12 21:01:55 +01:00
|
|
|
|
|
|
|
def report_extraction(self, video_id):
|
|
|
|
"""Report information extraction."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
|
2010-02-12 21:01:55 +01:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
return
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
2010-07-13 19:37:07 +02:00
|
|
|
# At this point we have a new video
|
2010-07-22 20:27:35 +02:00
|
|
|
self._downloader.increment_downloads()
|
2010-07-13 19:37:07 +02:00
|
|
|
|
2010-02-12 21:01:55 +01:00
|
|
|
video_id = url.split('/')[-1]
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
try:
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
except ValueError, err:
|
|
|
|
# since this is the last-resort InfoExtractor, if
|
|
|
|
# this error is thrown, it'll be thrown here
|
|
|
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
|
2010-12-05 19:48:22 +01:00
|
|
|
self.report_extraction(video_id)
|
2010-02-12 21:01:55 +01:00
|
|
|
# Start with something easy: JW Player in SWFObject
|
|
|
|
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
# Broaden the search a little bit
|
|
|
|
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
|
|
|
|
# It's possible that one of the regexes
|
|
|
|
# matched, but returned an empty group:
|
|
|
|
if mobj.group(1) is None:
|
|
|
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
|
|
|
|
video_url = urllib.unquote(mobj.group(1))
|
|
|
|
video_id = os.path.basename(video_url)
|
|
|
|
|
|
|
|
# here's a fun little line of code for you:
|
|
|
|
video_extension = os.path.splitext(video_id)[1][1:]
|
|
|
|
video_id = os.path.splitext(video_id)[0]
|
|
|
|
|
|
|
|
# it's tempting to parse this further, but you would
|
|
|
|
# have to take into account all the variations like
|
|
|
|
# Video Title - Site Name
|
|
|
|
# Site Name | Video Title
|
|
|
|
# Video Title - Tagline | Site Name
|
|
|
|
# and so on and so forth; it's just not practical
|
|
|
|
mobj = re.search(r'<title>(.*)</title>', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
|
|
|
return
|
|
|
|
video_title = mobj.group(1).decode('utf-8')
|
|
|
|
video_title = sanitize_title(video_title)
|
2010-02-21 00:13:34 +01:00
|
|
|
simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title)
|
2010-02-12 21:01:55 +01:00
|
|
|
|
|
|
|
# video uploader is domain name
|
|
|
|
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
|
|
|
return
|
|
|
|
video_uploader = mobj.group(1).decode('utf-8')
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Process video information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': video_id.decode('utf-8'),
|
|
|
|
'url': video_url.decode('utf-8'),
|
|
|
|
'uploader': video_uploader,
|
2010-11-19 19:31:26 +01:00
|
|
|
'upload_date': u'NA',
|
2010-02-12 21:01:55 +01:00
|
|
|
'title': video_title,
|
2010-02-21 00:13:34 +01:00
|
|
|
'stitle': simple_title,
|
2010-01-15 22:26:41 +01:00
|
|
|
'ext': video_extension.decode('utf-8'),
|
2010-03-19 18:15:43 +01:00
|
|
|
'format': u'NA',
|
2010-05-30 19:49:51 +02:00
|
|
|
'player_url': None,
|
2010-01-15 22:26:41 +01:00
|
|
|
})
|
2010-07-22 20:26:37 +02:00
|
|
|
except UnavailableVideoError, err:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video')
|
2010-01-15 22:26:41 +01:00
|
|
|
|
|
|
|
|
2009-02-02 19:59:48 +01:00
|
|
|
class YoutubeSearchIE(InfoExtractor):
|
|
|
|
"""Information Extractor for YouTube search queries."""
|
|
|
|
_VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+'
|
|
|
|
_TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
|
|
|
|
_VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
|
2009-08-15 00:33:50 +02:00
|
|
|
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
|
2009-02-02 19:59:48 +01:00
|
|
|
_youtube_ie = None
|
2009-04-07 02:39:16 +02:00
|
|
|
_max_youtube_results = 1000
|
2009-02-02 19:59:48 +01:00
|
|
|
|
2009-04-02 20:23:13 +02:00
|
|
|
def __init__(self, youtube_ie, downloader=None):
|
2009-02-02 19:59:48 +01:00
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
self._youtube_ie = youtube_ie
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None)
|
|
|
|
|
|
|
|
def report_download_page(self, query, pagenum):
|
|
|
|
"""Report attempt to download playlist page with given number."""
|
2010-02-12 21:01:55 +01:00
|
|
|
query = query.decode(preferredencoding())
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
|
2009-02-02 19:59:48 +01:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._youtube_ie.initialize()
|
|
|
|
|
|
|
|
def _real_extract(self, query):
|
|
|
|
mobj = re.match(self._VALID_QUERY, query)
|
|
|
|
if mobj is None:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2009-02-02 19:59:48 +01:00
|
|
|
|
|
|
|
prefix, query = query.split(':')
|
|
|
|
prefix = prefix[8:]
|
2010-02-12 21:01:55 +01:00
|
|
|
query = query.encode('utf-8')
|
2009-04-02 20:23:13 +02:00
|
|
|
if prefix == '':
|
2009-04-23 22:20:06 +02:00
|
|
|
self._download_n_results(query, 1)
|
|
|
|
return
|
2009-04-02 20:23:13 +02:00
|
|
|
elif prefix == 'all':
|
2009-04-23 22:20:06 +02:00
|
|
|
self._download_n_results(query, self._max_youtube_results)
|
|
|
|
return
|
2009-04-02 20:23:13 +02:00
|
|
|
else:
|
2009-02-02 19:59:48 +01:00
|
|
|
try:
|
2009-05-27 23:03:56 +02:00
|
|
|
n = long(prefix)
|
2009-02-02 19:59:48 +01:00
|
|
|
if n <= 0:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2009-04-07 17:21:27 +02:00
|
|
|
elif n > self._max_youtube_results:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
|
2009-04-07 17:21:27 +02:00
|
|
|
n = self._max_youtube_results
|
2009-04-23 22:20:06 +02:00
|
|
|
self._download_n_results(query, n)
|
|
|
|
return
|
2009-05-27 23:03:56 +02:00
|
|
|
except ValueError: # parsing prefix as integer fails
|
2009-04-23 22:20:06 +02:00
|
|
|
self._download_n_results(query, 1)
|
|
|
|
return
|
2009-02-02 19:59:48 +01:00
|
|
|
|
|
|
|
def _download_n_results(self, query, n):
|
|
|
|
"""Downloads a specified number of results for a query"""
|
|
|
|
|
|
|
|
video_ids = []
|
|
|
|
already_seen = set()
|
|
|
|
pagenum = 1
|
|
|
|
|
|
|
|
while True:
|
|
|
|
self.report_download_page(query, pagenum)
|
2009-02-02 20:29:44 +01:00
|
|
|
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
|
2009-02-02 19:59:48 +01:00
|
|
|
request = urllib2.Request(result_url, None, std_headers)
|
|
|
|
try:
|
|
|
|
page = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2009-02-02 19:59:48 +01:00
|
|
|
|
|
|
|
# Extract video identifiers
|
|
|
|
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
|
|
|
|
video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
|
|
|
|
if video_id not in already_seen:
|
|
|
|
video_ids.append(video_id)
|
|
|
|
already_seen.add(video_id)
|
|
|
|
if len(video_ids) == n:
|
|
|
|
# Specified n videos reached
|
|
|
|
for id in video_ids:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
|
|
|
return
|
2009-02-02 19:59:48 +01:00
|
|
|
|
2009-08-15 00:33:50 +02:00
|
|
|
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
|
2009-02-02 19:59:48 +01:00
|
|
|
for id in video_ids:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
|
|
|
return
|
2009-02-02 19:59:48 +01:00
|
|
|
|
|
|
|
pagenum = pagenum + 1
|
|
|
|
|
2010-04-04 17:57:59 +02:00
|
|
|
class GoogleSearchIE(InfoExtractor):
|
|
|
|
"""Information Extractor for Google Video search queries."""
|
|
|
|
_VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+'
|
|
|
|
_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
|
|
|
|
_VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
|
|
|
|
_MORE_PAGES_INDICATOR = r'<span>Next</span>'
|
|
|
|
_google_ie = None
|
|
|
|
_max_google_results = 1000
|
|
|
|
|
|
|
|
def __init__(self, google_ie, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
self._google_ie = google_ie
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(GoogleSearchIE._VALID_QUERY, url) is not None)
|
|
|
|
|
|
|
|
def report_download_page(self, query, pagenum):
|
|
|
|
"""Report attempt to download playlist page with given number."""
|
|
|
|
query = query.decode(preferredencoding())
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
|
2010-04-04 17:57:59 +02:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._google_ie.initialize()
|
|
|
|
|
|
|
|
def _real_extract(self, query):
|
|
|
|
mobj = re.match(self._VALID_QUERY, query)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
|
|
|
|
return
|
|
|
|
|
|
|
|
prefix, query = query.split(':')
|
|
|
|
prefix = prefix[8:]
|
|
|
|
query = query.encode('utf-8')
|
|
|
|
if prefix == '':
|
|
|
|
self._download_n_results(query, 1)
|
|
|
|
return
|
|
|
|
elif prefix == 'all':
|
|
|
|
self._download_n_results(query, self._max_google_results)
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
n = long(prefix)
|
|
|
|
if n <= 0:
|
|
|
|
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
|
|
|
return
|
|
|
|
elif n > self._max_google_results:
|
|
|
|
self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
|
|
|
|
n = self._max_google_results
|
|
|
|
self._download_n_results(query, n)
|
|
|
|
return
|
|
|
|
except ValueError: # parsing prefix as integer fails
|
|
|
|
self._download_n_results(query, 1)
|
|
|
|
return
|
|
|
|
|
|
|
|
def _download_n_results(self, query, n):
|
|
|
|
"""Downloads a specified number of results for a query"""
|
|
|
|
|
|
|
|
video_ids = []
|
|
|
|
already_seen = set()
|
|
|
|
pagenum = 1
|
|
|
|
|
|
|
|
while True:
|
|
|
|
self.report_download_page(query, pagenum)
|
|
|
|
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
|
|
|
|
request = urllib2.Request(result_url, None, std_headers)
|
|
|
|
try:
|
|
|
|
page = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract video identifiers
|
|
|
|
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
|
|
|
|
video_id = mobj.group(1)
|
|
|
|
if video_id not in already_seen:
|
|
|
|
video_ids.append(video_id)
|
|
|
|
already_seen.add(video_id)
|
|
|
|
if len(video_ids) == n:
|
|
|
|
# Specified n videos reached
|
|
|
|
for id in video_ids:
|
|
|
|
self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
|
|
|
|
return
|
|
|
|
|
|
|
|
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
|
|
|
|
for id in video_ids:
|
|
|
|
self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
|
|
|
|
return
|
|
|
|
|
|
|
|
pagenum = pagenum + 1
|
|
|
|
|
|
|
|
class YahooSearchIE(InfoExtractor):
|
|
|
|
"""Information Extractor for Yahoo! Video search queries."""
|
|
|
|
_VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+'
|
|
|
|
_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
|
|
|
|
_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
|
|
|
|
_MORE_PAGES_INDICATOR = r'\s*Next'
|
|
|
|
_yahoo_ie = None
|
|
|
|
_max_yahoo_results = 1000
|
|
|
|
|
|
|
|
def __init__(self, yahoo_ie, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
self._yahoo_ie = yahoo_ie
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(YahooSearchIE._VALID_QUERY, url) is not None)
|
|
|
|
|
|
|
|
def report_download_page(self, query, pagenum):
|
|
|
|
"""Report attempt to download playlist page with given number."""
|
|
|
|
query = query.decode(preferredencoding())
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
|
2010-04-04 17:57:59 +02:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._yahoo_ie.initialize()
|
|
|
|
|
|
|
|
def _real_extract(self, query):
|
|
|
|
mobj = re.match(self._VALID_QUERY, query)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
|
|
|
|
return
|
|
|
|
|
|
|
|
prefix, query = query.split(':')
|
|
|
|
prefix = prefix[8:]
|
|
|
|
query = query.encode('utf-8')
|
|
|
|
if prefix == '':
|
|
|
|
self._download_n_results(query, 1)
|
|
|
|
return
|
|
|
|
elif prefix == 'all':
|
|
|
|
self._download_n_results(query, self._max_yahoo_results)
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
n = long(prefix)
|
|
|
|
if n <= 0:
|
|
|
|
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
|
|
|
|
return
|
|
|
|
elif n > self._max_yahoo_results:
|
|
|
|
self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
|
|
|
|
n = self._max_yahoo_results
|
|
|
|
self._download_n_results(query, n)
|
|
|
|
return
|
|
|
|
except ValueError: # parsing prefix as integer fails
|
|
|
|
self._download_n_results(query, 1)
|
|
|
|
return
|
|
|
|
|
|
|
|
def _download_n_results(self, query, n):
|
|
|
|
"""Downloads a specified number of results for a query"""
|
|
|
|
|
|
|
|
video_ids = []
|
|
|
|
already_seen = set()
|
|
|
|
pagenum = 1
|
|
|
|
|
|
|
|
while True:
|
|
|
|
self.report_download_page(query, pagenum)
|
|
|
|
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
|
|
|
|
request = urllib2.Request(result_url, None, std_headers)
|
|
|
|
try:
|
|
|
|
page = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract video identifiers
|
|
|
|
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
|
|
|
|
video_id = mobj.group(1)
|
|
|
|
if video_id not in already_seen:
|
|
|
|
video_ids.append(video_id)
|
|
|
|
already_seen.add(video_id)
|
|
|
|
if len(video_ids) == n:
|
|
|
|
# Specified n videos reached
|
|
|
|
for id in video_ids:
|
|
|
|
self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
|
|
|
|
return
|
|
|
|
|
|
|
|
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
|
|
|
|
for id in video_ids:
|
|
|
|
self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
|
|
|
|
return
|
|
|
|
|
|
|
|
pagenum = pagenum + 1
|
|
|
|
|
2008-07-25 12:55:01 +02:00
|
|
|
class YoutubePlaylistIE(InfoExtractor):
|
|
|
|
"""Information Extractor for YouTube playlists."""
|
|
|
|
|
2010-04-03 09:45:45 +02:00
|
|
|
_VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists)\?.*?p=|user/.*?/user/)([^&]+).*'
|
2009-01-31 10:12:22 +01:00
|
|
|
_TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en'
|
2008-07-25 12:55:01 +02:00
|
|
|
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
|
2010-04-02 19:51:54 +02:00
|
|
|
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
|
2008-07-25 12:55:01 +02:00
|
|
|
_youtube_ie = None
|
|
|
|
|
|
|
|
def __init__(self, youtube_ie, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
self._youtube_ie = youtube_ie
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_download_page(self, playlist_id, pagenum):
|
|
|
|
"""Report attempt to download playlist page with given number."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
|
2008-07-25 12:55:01 +02:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._youtube_ie.initialize()
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract playlist id
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: invalid url: %s' % url)
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2008-07-25 12:55:01 +02:00
|
|
|
|
|
|
|
# Download playlist pages
|
|
|
|
playlist_id = mobj.group(1)
|
|
|
|
video_ids = []
|
|
|
|
pagenum = 1
|
|
|
|
|
|
|
|
while True:
|
|
|
|
self.report_download_page(playlist_id, pagenum)
|
|
|
|
request = urllib2.Request(self._TEMPLATE_URL % (playlist_id, pagenum), None, std_headers)
|
|
|
|
try:
|
|
|
|
page = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
2009-04-23 22:01:28 +02:00
|
|
|
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
|
2009-04-23 22:20:06 +02:00
|
|
|
return
|
2008-07-25 12:55:01 +02:00
|
|
|
|
|
|
|
# Extract video identifiers
|
2008-11-01 15:52:51 +01:00
|
|
|
ids_in_page = []
|
2008-07-25 12:55:01 +02:00
|
|
|
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
|
2008-11-01 15:52:51 +01:00
|
|
|
if mobj.group(1) not in ids_in_page:
|
|
|
|
ids_in_page.append(mobj.group(1))
|
|
|
|
video_ids.extend(ids_in_page)
|
2008-07-25 12:55:01 +02:00
|
|
|
|
2010-04-02 19:51:54 +02:00
|
|
|
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
|
2008-07-25 12:55:01 +02:00
|
|
|
break
|
|
|
|
pagenum = pagenum + 1
|
|
|
|
|
2010-11-04 23:19:09 +01:00
|
|
|
playliststart = self._downloader.params.get('playliststart', 1) - 1
|
|
|
|
playlistend = self._downloader.params.get('playlistend', -1)
|
|
|
|
video_ids = video_ids[playliststart:playlistend]
|
|
|
|
|
2008-07-25 12:55:01 +02:00
|
|
|
for id in video_ids:
|
2009-04-23 22:20:06 +02:00
|
|
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
|
|
|
return
|
2008-07-25 12:55:01 +02:00
|
|
|
|
2009-11-25 22:34:34 +01:00
|
|
|
class YoutubeUserIE(InfoExtractor):
|
|
|
|
"""Information Extractor for YouTube users."""
|
|
|
|
|
|
|
|
_VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/user/(.*)'
|
|
|
|
_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
|
2010-01-03 13:12:34 +01:00
|
|
|
_VIDEO_INDICATOR = r'http://gdata.youtube.com/feeds/api/videos/(.*)' # XXX Fix this.
|
2009-11-25 22:34:34 +01:00
|
|
|
_youtube_ie = None
|
|
|
|
|
|
|
|
def __init__(self, youtube_ie, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
self._youtube_ie = youtube_ie
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(YoutubeUserIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_download_page(self, username):
|
|
|
|
"""Report attempt to download user page."""
|
2010-10-23 13:19:26 +02:00
|
|
|
self._downloader.to_screen(u'[youtube] user %s: Downloading page ' % (username))
|
2009-11-25 22:34:34 +01:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._youtube_ie.initialize()
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract username
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: invalid url: %s' % url)
|
|
|
|
return
|
|
|
|
|
|
|
|
# Download user page
|
|
|
|
username = mobj.group(1)
|
|
|
|
video_ids = []
|
|
|
|
pagenum = 1
|
|
|
|
|
|
|
|
self.report_download_page(username)
|
|
|
|
request = urllib2.Request(self._TEMPLATE_URL % (username), None, std_headers)
|
|
|
|
try:
|
|
|
|
page = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Extract video identifiers
|
|
|
|
ids_in_page = []
|
|
|
|
|
|
|
|
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
|
|
|
|
if mobj.group(1) not in ids_in_page:
|
|
|
|
ids_in_page.append(mobj.group(1))
|
|
|
|
video_ids.extend(ids_in_page)
|
|
|
|
|
2010-11-04 23:19:09 +01:00
|
|
|
playliststart = self._downloader.params.get('playliststart', 1) - 1
|
|
|
|
playlistend = self._downloader.params.get('playlistend', -1)
|
|
|
|
video_ids = video_ids[playliststart:playlistend]
|
2010-08-04 18:52:00 +02:00
|
|
|
|
2009-11-25 22:34:34 +01:00
|
|
|
for id in video_ids:
|
|
|
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
|
|
|
return
|
|
|
|
|
2010-12-05 20:09:14 +01:00
|
|
|
class DepositFilesIE(InfoExtractor):
|
|
|
|
"""Information extractor for depositfiles.com"""
|
|
|
|
|
|
|
|
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles.com/(?:../(?#locale))?files/(.+)'
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def suitable(url):
|
|
|
|
return (re.match(DepositFilesIE._VALID_URL, url) is not None)
|
|
|
|
|
|
|
|
def report_download_webpage(self, file_id):
|
|
|
|
"""Report webpage download."""
|
|
|
|
self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
|
|
|
|
|
|
|
|
def report_extraction(self, file_id):
|
|
|
|
"""Report information extraction."""
|
|
|
|
self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
return
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# At this point we have a new file
|
|
|
|
self._downloader.increment_downloads()
|
|
|
|
|
|
|
|
file_id = url.split('/')[-1]
|
|
|
|
# Rebuild url in english locale
|
|
|
|
url = 'http://depositfiles.com/en/files/' + file_id
|
|
|
|
|
|
|
|
# Retrieve file webpage with 'Free download' button pressed
|
|
|
|
free_download_indication = { 'gateway_result' : '1' }
|
|
|
|
request = urllib2.Request(url, urllib.urlencode(free_download_indication), std_headers)
|
|
|
|
try:
|
|
|
|
self.report_download_webpage(file_id)
|
|
|
|
webpage = urllib2.urlopen(request).read()
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
|
|
|
# Search for the real file URL
|
|
|
|
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
|
|
|
|
if (mobj is None) or (mobj.group(1) is None):
|
|
|
|
# Try to figure out reason of the error.
|
|
|
|
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
|
|
|
|
if (mobj is not None) and (mobj.group(1) is not None):
|
|
|
|
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
|
|
|
|
self._downloader.trouble(u'ERROR: %s' % restriction_message)
|
|
|
|
else:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
|
|
|
|
return
|
|
|
|
|
|
|
|
file_url = mobj.group(1)
|
|
|
|
file_extension = os.path.splitext(file_url)[1][1:]
|
|
|
|
|
|
|
|
# Search for file title
|
|
|
|
mobj = re.search(r'<b title="(.*?)">', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
|
|
|
return
|
|
|
|
file_title = mobj.group(1).decode('utf-8')
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Process file information
|
|
|
|
self._downloader.process_info({
|
|
|
|
'id': file_id.decode('utf-8'),
|
|
|
|
'url': file_url.decode('utf-8'),
|
|
|
|
'uploader': u'NA',
|
|
|
|
'upload_date': u'NA',
|
|
|
|
'title': file_title,
|
|
|
|
'stitle': file_title,
|
|
|
|
'ext': file_extension.decode('utf-8'),
|
|
|
|
'format': u'NA',
|
|
|
|
'player_url': None,
|
|
|
|
})
|
|
|
|
except UnavailableVideoError, err:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download file')
|
|
|
|
|
2008-07-27 12:13:49 +02:00
|
|
|
class PostProcessor(object):
|
|
|
|
"""Post Processor class.
|
|
|
|
|
|
|
|
PostProcessor objects can be added to downloaders with their
|
|
|
|
add_post_processor() method. When the downloader has finished a
|
|
|
|
successful download, it will take its internal chain of PostProcessors
|
|
|
|
and start calling the run() method on each one of them, first with
|
|
|
|
an initial argument and then with the returned value of the previous
|
|
|
|
PostProcessor.
|
|
|
|
|
|
|
|
The chain will be stopped if one of them ever returns None or the end
|
|
|
|
of the chain is reached.
|
|
|
|
|
|
|
|
PostProcessor objects follow a "mutual registration" process similar
|
|
|
|
to InfoExtractor objects.
|
|
|
|
"""
|
|
|
|
|
|
|
|
_downloader = None
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
self._downloader = downloader
|
|
|
|
|
|
|
|
def set_downloader(self, downloader):
|
|
|
|
"""Sets the downloader for this PP."""
|
|
|
|
self._downloader = downloader
|
|
|
|
|
|
|
|
def run(self, information):
|
|
|
|
"""Run the PostProcessor.
|
|
|
|
|
|
|
|
The "information" argument is a dictionary like the ones
|
2009-04-25 14:33:52 +02:00
|
|
|
composed by InfoExtractors. The only difference is that this
|
2008-07-27 12:13:49 +02:00
|
|
|
one has an extra field called "filepath" that points to the
|
|
|
|
downloaded file.
|
|
|
|
|
|
|
|
When this method returns None, the postprocessing chain is
|
|
|
|
stopped. However, this method may return an information
|
|
|
|
dictionary that will be passed to the next postprocessing
|
|
|
|
object in the chain. It can be the one it received after
|
|
|
|
changing some fields.
|
|
|
|
|
|
|
|
In addition, this method may raise a PostProcessingError
|
|
|
|
exception that will be taken into account by the downloader
|
|
|
|
it was called from.
|
|
|
|
"""
|
|
|
|
return information # by default, do nothing
|
|
|
|
|
|
|
|
### MAIN PROGRAM ###
|
2008-07-21 23:12:31 +02:00
|
|
|
if __name__ == '__main__':
|
|
|
|
try:
|
2008-07-22 10:14:13 +02:00
|
|
|
# Modules needed only when running the main program
|
2008-07-22 10:56:54 +02:00
|
|
|
import getpass
|
2008-07-22 10:14:13 +02:00
|
|
|
import optparse
|
|
|
|
|
2009-11-19 20:19:47 +01:00
|
|
|
# Function to update the program file with the latest version from bitbucket.org
|
|
|
|
def update_self(downloader, filename):
|
|
|
|
# Note: downloader only used for options
|
|
|
|
if not os.access (filename, os.W_OK):
|
|
|
|
sys.exit('ERROR: no write permissions on %s' % filename)
|
|
|
|
|
2010-10-23 13:19:26 +02:00
|
|
|
downloader.to_screen('Updating to latest stable version...')
|
2010-10-31 15:46:58 +01:00
|
|
|
latest_url = 'http://github.com/rg3/youtube-dl/raw/master/LATEST_VERSION'
|
2009-11-19 20:19:47 +01:00
|
|
|
latest_version = urllib.urlopen(latest_url).read().strip()
|
2010-10-31 15:46:58 +01:00
|
|
|
prog_url = 'http://github.com/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
|
2009-11-19 20:19:47 +01:00
|
|
|
newcontent = urllib.urlopen(prog_url).read()
|
|
|
|
stream = open(filename, 'w')
|
|
|
|
stream.write(newcontent)
|
|
|
|
stream.close()
|
2010-10-23 13:19:26 +02:00
|
|
|
downloader.to_screen('Updated to version %s' % latest_version)
|
2009-11-19 20:19:47 +01:00
|
|
|
|
2008-07-22 10:14:13 +02:00
|
|
|
# Parse command line
|
2008-07-22 10:56:54 +02:00
|
|
|
parser = optparse.OptionParser(
|
2009-04-28 07:30:20 +02:00
|
|
|
usage='Usage: %prog [options] url...',
|
2010-11-19 19:41:09 +01:00
|
|
|
version='2010.11.19',
|
2009-04-28 07:30:20 +02:00
|
|
|
conflict_handler='resolve',
|
|
|
|
)
|
|
|
|
|
2008-07-22 10:56:54 +02:00
|
|
|
parser.add_option('-h', '--help',
|
|
|
|
action='help', help='print this help text and exit')
|
|
|
|
parser.add_option('-v', '--version',
|
|
|
|
action='version', help='print program version and exit')
|
2009-11-19 20:19:47 +01:00
|
|
|
parser.add_option('-U', '--update',
|
|
|
|
action='store_true', dest='update_self', help='update this program to latest stable version')
|
2009-04-28 07:30:20 +02:00
|
|
|
parser.add_option('-i', '--ignore-errors',
|
|
|
|
action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
|
|
|
|
parser.add_option('-r', '--rate-limit',
|
2010-06-29 11:53:13 +02:00
|
|
|
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
|
2010-05-30 18:34:56 +02:00
|
|
|
parser.add_option('-R', '--retries',
|
2010-06-29 11:53:13 +02:00
|
|
|
dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
|
2010-08-04 18:52:00 +02:00
|
|
|
parser.add_option('--playlist-start',
|
|
|
|
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
|
2010-11-04 23:19:09 +01:00
|
|
|
parser.add_option('--playlist-end',
|
|
|
|
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
|
2009-04-28 07:30:20 +02:00
|
|
|
|
|
|
|
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
|
|
|
authentication.add_option('-u', '--username',
|
2010-06-29 11:53:13 +02:00
|
|
|
dest='username', metavar='USERNAME', help='account username')
|
2009-04-28 07:30:20 +02:00
|
|
|
authentication.add_option('-p', '--password',
|
2010-06-29 11:53:13 +02:00
|
|
|
dest='password', metavar='PASSWORD', help='account password')
|
2009-04-28 07:30:20 +02:00
|
|
|
authentication.add_option('-n', '--netrc',
|
2008-07-22 10:56:54 +02:00
|
|
|
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
2009-04-28 07:30:20 +02:00
|
|
|
parser.add_option_group(authentication)
|
|
|
|
|
|
|
|
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
|
|
|
video_format.add_option('-f', '--format',
|
2010-06-29 11:53:13 +02:00
|
|
|
action='store', dest='format', metavar='FORMAT', help='video format code')
|
2009-04-28 07:30:20 +02:00
|
|
|
video_format.add_option('-m', '--mobile-version',
|
2009-05-13 22:08:34 +02:00
|
|
|
action='store_const', dest='format', help='alias for -f 17', const='17')
|
2010-03-19 18:15:43 +01:00
|
|
|
video_format.add_option('--all-formats',
|
|
|
|
action='store_const', dest='format', help='download all available video formats', const='-1')
|
2010-07-13 18:20:02 +02:00
|
|
|
video_format.add_option('--max-quality',
|
2010-07-22 20:28:20 +02:00
|
|
|
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
2010-08-01 10:40:37 +02:00
|
|
|
video_format.add_option('-b', '--best-quality',
|
|
|
|
action='store_true', dest='bestquality', help='download the best video quality (DEPRECATED)')
|
2009-04-28 07:30:20 +02:00
|
|
|
parser.add_option_group(video_format)
|
|
|
|
|
|
|
|
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
|
|
|
verbosity.add_option('-q', '--quiet',
|
|
|
|
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
|
|
|
verbosity.add_option('-s', '--simulate',
|
|
|
|
action='store_true', dest='simulate', help='do not download video', default=False)
|
|
|
|
verbosity.add_option('-g', '--get-url',
|
|
|
|
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
|
|
|
verbosity.add_option('-e', '--get-title',
|
|
|
|
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
2010-04-04 17:57:59 +02:00
|
|
|
verbosity.add_option('--get-thumbnail',
|
|
|
|
action='store_true', dest='getthumbnail', help='simulate, quiet but print thumbnail URL', default=False)
|
|
|
|
verbosity.add_option('--get-description',
|
|
|
|
action='store_true', dest='getdescription', help='simulate, quiet but print video description', default=False)
|
2010-03-07 11:24:22 +01:00
|
|
|
verbosity.add_option('--no-progress',
|
|
|
|
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
2009-04-28 07:30:20 +02:00
|
|
|
parser.add_option_group(verbosity)
|
|
|
|
|
|
|
|
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
2009-04-29 19:32:40 +02:00
|
|
|
filesystem.add_option('-t', '--title',
|
|
|
|
action='store_true', dest='usetitle', help='use title in file name', default=False)
|
|
|
|
filesystem.add_option('-l', '--literal',
|
|
|
|
action='store_true', dest='useliteral', help='use literal title in file name', default=False)
|
2010-11-06 21:34:22 +01:00
|
|
|
filesystem.add_option('-A', '--auto-number',
|
2010-11-06 22:13:59 +01:00
|
|
|
action='store_true', dest='autonumber', help='number downloaded files starting from 00000', default=False)
|
2009-04-28 07:30:20 +02:00
|
|
|
filesystem.add_option('-o', '--output',
|
2010-06-29 11:53:13 +02:00
|
|
|
dest='outtmpl', metavar='TEMPLATE', help='output filename template')
|
2009-04-28 07:30:20 +02:00
|
|
|
filesystem.add_option('-a', '--batch-file',
|
2010-06-29 11:53:13 +02:00
|
|
|
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
2009-04-28 07:30:20 +02:00
|
|
|
filesystem.add_option('-w', '--no-overwrites',
|
2009-02-04 21:38:31 +01:00
|
|
|
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
2009-05-26 23:06:21 +02:00
|
|
|
filesystem.add_option('-c', '--continue',
|
|
|
|
action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
|
2010-10-23 12:54:00 +02:00
|
|
|
filesystem.add_option('--cookies',
|
|
|
|
dest='cookiefile', metavar='FILE', help='file to dump cookie jar to')
|
2009-04-28 07:30:20 +02:00
|
|
|
parser.add_option_group(filesystem)
|
|
|
|
|
2008-07-22 10:56:54 +02:00
|
|
|
(opts, args) = parser.parse_args()
|
2010-05-21 21:35:34 +02:00
|
|
|
|
2010-10-23 12:54:00 +02:00
|
|
|
# Open appropriate CookieJar
|
|
|
|
if opts.cookiefile is None:
|
|
|
|
jar = cookielib.CookieJar()
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
jar = cookielib.MozillaCookieJar(opts.cookiefile)
|
2010-10-24 17:31:33 +02:00
|
|
|
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
|
|
|
|
jar.load()
|
2010-10-23 12:54:00 +02:00
|
|
|
except (IOError, OSError), err:
|
|
|
|
sys.exit(u'ERROR: unable to open cookie file')
|
|
|
|
|
|
|
|
# General configuration
|
|
|
|
cookie_processor = urllib2.HTTPCookieProcessor(jar)
|
|
|
|
urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
|
|
|
|
urllib2.install_opener(urllib2.build_opener(cookie_processor))
|
|
|
|
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
|
|
|
|
|
2009-01-31 10:25:59 +01:00
|
|
|
# Batch file verification
|
2009-01-31 12:07:37 +01:00
|
|
|
batchurls = []
|
2009-01-31 10:25:59 +01:00
|
|
|
if opts.batchfile is not None:
|
|
|
|
try:
|
2010-05-21 21:35:34 +02:00
|
|
|
if opts.batchfile == '-':
|
|
|
|
batchfd = sys.stdin
|
|
|
|
else:
|
|
|
|
batchfd = open(opts.batchfile, 'r')
|
|
|
|
batchurls = batchfd.readlines()
|
2009-04-25 13:30:50 +02:00
|
|
|
batchurls = [x.strip() for x in batchurls]
|
2010-11-06 21:21:45 +01:00
|
|
|
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
|
2009-01-31 10:25:59 +01:00
|
|
|
except IOError:
|
|
|
|
sys.exit(u'ERROR: batch file could not be read')
|
|
|
|
all_urls = batchurls + args
|
|
|
|
|
2008-07-22 10:56:54 +02:00
|
|
|
# Conflicting, missing and erroneous options
|
2010-08-01 10:40:37 +02:00
|
|
|
if opts.bestquality:
|
|
|
|
print >>sys.stderr, u'\nWARNING: -b/--best-quality IS DEPRECATED AS IT IS THE DEFAULT BEHAVIOR NOW\n'
|
2008-07-22 10:56:54 +02:00
|
|
|
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
2009-04-28 21:35:25 +02:00
|
|
|
parser.error(u'using .netrc conflicts with giving username/password')
|
2008-07-22 10:56:54 +02:00
|
|
|
if opts.password is not None and opts.username is None:
|
2009-04-28 21:35:25 +02:00
|
|
|
parser.error(u'account username missing')
|
2010-11-06 21:34:22 +01:00
|
|
|
if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber):
|
|
|
|
parser.error(u'using output template conflicts with using title, literal title or auto number')
|
2008-07-22 10:56:54 +02:00
|
|
|
if opts.usetitle and opts.useliteral:
|
2009-04-28 21:35:25 +02:00
|
|
|
parser.error(u'using title conflicts with using literal title')
|
2008-07-22 10:56:54 +02:00
|
|
|
if opts.username is not None and opts.password is None:
|
2008-07-25 13:28:41 +02:00
|
|
|
opts.password = getpass.getpass(u'Type account password and press return:')
|
2008-07-24 09:47:07 +02:00
|
|
|
if opts.ratelimit is not None:
|
|
|
|
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
|
|
|
|
if numeric_limit is None:
|
2009-04-28 21:35:25 +02:00
|
|
|
parser.error(u'invalid rate limit specified')
|
2008-07-24 09:47:07 +02:00
|
|
|
opts.ratelimit = numeric_limit
|
2010-05-30 18:34:56 +02:00
|
|
|
if opts.retries is not None:
|
|
|
|
try:
|
|
|
|
opts.retries = long(opts.retries)
|
|
|
|
except (TypeError, ValueError), err:
|
|
|
|
parser.error(u'invalid retry count specified')
|
2010-11-04 23:19:09 +01:00
|
|
|
try:
|
|
|
|
opts.playliststart = long(opts.playliststart)
|
|
|
|
if opts.playliststart <= 0:
|
|
|
|
raise ValueError
|
|
|
|
except (TypeError, ValueError), err:
|
|
|
|
parser.error(u'invalid playlist start number specified')
|
|
|
|
try:
|
|
|
|
opts.playlistend = long(opts.playlistend)
|
|
|
|
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
|
|
|
|
raise ValueError
|
|
|
|
except (TypeError, ValueError), err:
|
|
|
|
parser.error(u'invalid playlist end number specified')
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
# Information extractors
|
|
|
|
youtube_ie = YoutubeIE()
|
2008-07-24 15:53:24 +02:00
|
|
|
metacafe_ie = MetacafeIE(youtube_ie)
|
2010-07-02 01:53:47 +02:00
|
|
|
dailymotion_ie = DailymotionIE()
|
2008-07-25 12:55:01 +02:00
|
|
|
youtube_pl_ie = YoutubePlaylistIE(youtube_ie)
|
2009-11-25 22:34:34 +01:00
|
|
|
youtube_user_ie = YoutubeUserIE(youtube_ie)
|
2009-02-02 19:59:48 +01:00
|
|
|
youtube_search_ie = YoutubeSearchIE(youtube_ie)
|
2010-01-15 22:26:41 +01:00
|
|
|
google_ie = GoogleIE()
|
2010-04-04 17:57:59 +02:00
|
|
|
google_search_ie = GoogleSearchIE(google_ie)
|
2010-01-15 22:26:41 +01:00
|
|
|
photobucket_ie = PhotobucketIE()
|
2010-04-01 20:55:43 +02:00
|
|
|
yahoo_ie = YahooIE()
|
2010-04-04 17:57:59 +02:00
|
|
|
yahoo_search_ie = YahooSearchIE(yahoo_ie)
|
2010-12-05 20:09:14 +01:00
|
|
|
deposit_files_ie = DepositFilesIE()
|
2010-02-12 21:01:55 +01:00
|
|
|
generic_ie = GenericIE()
|
2008-07-21 23:12:31 +02:00
|
|
|
|
|
|
|
# File downloader
|
2008-07-21 23:53:06 +02:00
|
|
|
fd = FileDownloader({
|
2008-07-22 10:56:54 +02:00
|
|
|
'usenetrc': opts.usenetrc,
|
|
|
|
'username': opts.username,
|
|
|
|
'password': opts.password,
|
2010-04-04 17:57:59 +02:00
|
|
|
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription),
|
2008-07-22 10:56:54 +02:00
|
|
|
'forceurl': opts.geturl,
|
|
|
|
'forcetitle': opts.gettitle,
|
2010-04-04 17:57:59 +02:00
|
|
|
'forcethumbnail': opts.getthumbnail,
|
|
|
|
'forcedescription': opts.getdescription,
|
|
|
|
'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription),
|
2009-05-25 21:02:59 +02:00
|
|
|
'format': opts.format,
|
2010-07-13 18:20:02 +02:00
|
|
|
'format_limit': opts.format_limit,
|
2009-09-13 10:45:04 +02:00
|
|
|
'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
|
2010-03-19 18:15:43 +01:00
|
|
|
or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
|
|
|
|
or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
|
|
|
|
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
|
2010-11-06 21:34:22 +01:00
|
|
|
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
|
|
|
|
or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
2008-07-25 13:28:41 +02:00
|
|
|
or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
|
|
|
|
or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
|
2010-11-06 21:34:22 +01:00
|
|
|
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
2008-07-25 13:28:41 +02:00
|
|
|
or u'%(id)s.%(ext)s'),
|
2008-07-22 11:33:41 +02:00
|
|
|
'ignoreerrors': opts.ignoreerrors,
|
2008-07-24 09:47:07 +02:00
|
|
|
'ratelimit': opts.ratelimit,
|
2009-02-04 21:38:31 +01:00
|
|
|
'nooverwrites': opts.nooverwrites,
|
2010-05-30 18:34:56 +02:00
|
|
|
'retries': opts.retries,
|
2009-05-27 22:50:18 +02:00
|
|
|
'continuedl': opts.continue_dl,
|
2010-03-07 11:24:22 +01:00
|
|
|
'noprogress': opts.noprogress,
|
2010-08-04 18:52:00 +02:00
|
|
|
'playliststart': opts.playliststart,
|
2010-11-04 23:19:09 +01:00
|
|
|
'playlistend': opts.playlistend,
|
2010-10-23 13:19:26 +02:00
|
|
|
'logtostderr': opts.outtmpl == '-',
|
2008-07-21 23:53:06 +02:00
|
|
|
})
|
2009-02-02 19:59:48 +01:00
|
|
|
fd.add_info_extractor(youtube_search_ie)
|
2008-07-25 12:55:01 +02:00
|
|
|
fd.add_info_extractor(youtube_pl_ie)
|
2009-11-25 22:34:34 +01:00
|
|
|
fd.add_info_extractor(youtube_user_ie)
|
2008-07-24 15:53:24 +02:00
|
|
|
fd.add_info_extractor(metacafe_ie)
|
2010-07-02 01:53:47 +02:00
|
|
|
fd.add_info_extractor(dailymotion_ie)
|
2008-07-21 23:12:31 +02:00
|
|
|
fd.add_info_extractor(youtube_ie)
|
2010-01-15 22:26:41 +01:00
|
|
|
fd.add_info_extractor(google_ie)
|
2010-04-04 17:57:59 +02:00
|
|
|
fd.add_info_extractor(google_search_ie)
|
2010-01-15 22:26:41 +01:00
|
|
|
fd.add_info_extractor(photobucket_ie)
|
2010-04-01 20:55:43 +02:00
|
|
|
fd.add_info_extractor(yahoo_ie)
|
2010-04-04 17:57:59 +02:00
|
|
|
fd.add_info_extractor(yahoo_search_ie)
|
2010-12-05 20:09:14 +01:00
|
|
|
fd.add_info_extractor(deposit_files_ie)
|
2009-11-19 20:19:47 +01:00
|
|
|
|
2010-02-12 21:01:55 +01:00
|
|
|
# This must come last since it's the
|
|
|
|
# fallback if none of the others work
|
|
|
|
fd.add_info_extractor(generic_ie)
|
|
|
|
|
2009-11-19 20:19:47 +01:00
|
|
|
# Update version
|
|
|
|
if opts.update_self:
|
|
|
|
update_self(fd, sys.argv[0])
|
|
|
|
|
|
|
|
# Maybe do nothing
|
|
|
|
if len(all_urls) < 1:
|
|
|
|
if not opts.update_self:
|
|
|
|
parser.error(u'you must provide at least one URL')
|
|
|
|
else:
|
|
|
|
sys.exit()
|
2009-01-31 10:25:59 +01:00
|
|
|
retcode = fd.download(all_urls)
|
2010-10-23 12:54:00 +02:00
|
|
|
|
|
|
|
# Dump cookie jar if requested
|
|
|
|
if opts.cookiefile is not None:
|
|
|
|
try:
|
|
|
|
jar.save()
|
|
|
|
except (IOError, OSError), err:
|
|
|
|
sys.exit(u'ERROR: unable to save cookie jar')
|
|
|
|
|
2008-07-22 11:16:32 +02:00
|
|
|
sys.exit(retcode)
|
2008-07-21 23:12:31 +02:00
|
|
|
|
2008-07-22 15:52:56 +02:00
|
|
|
except DownloadError:
|
|
|
|
sys.exit(1)
|
|
|
|
except SameFileError:
|
2008-07-25 13:28:41 +02:00
|
|
|
sys.exit(u'ERROR: fixed output name but more than one file to download')
|
2008-07-21 23:12:31 +02:00
|
|
|
except KeyboardInterrupt:
|
2008-07-25 13:28:41 +02:00
|
|
|
sys.exit(u'\nERROR: Interrupted by user')
|