diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index fcc94db2c..0b6293897 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -49,36 +49,6 @@ -class GoogleSearchIE(SearchInfoExtractor): - """Information Extractor for Google Video search queries.""" - _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"' - _MAX_RESULTS = 1000 - IE_NAME = u'video.google:search' - _SEARCH_KEY = 'gvsearch' - - def _get_n_results(self, query, n): - """Get a specified number of results for a query""" - - res = { - '_type': 'playlist', - 'id': query, - 'entries': [] - } - - for pagenum in itertools.count(1): - result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10) - webpage = self._download_webpage(result_url, u'gvsearch:' + query, - note='Downloading result page ' + str(pagenum)) - - for mobj in re.finditer(r'

n) or not re.search(self._MORE_PAGES_INDICATOR, webpage): - return res class YahooSearchIE(SearchInfoExtractor): """Information Extractor for Yahoo! Video search queries.""" diff --git a/youtube_dl/extractor/google.py b/youtube_dl/extractor/google.py new file mode 100644 index 000000000..21c240e51 --- /dev/null +++ b/youtube_dl/extractor/google.py @@ -0,0 +1,39 @@ +import itertools +import re + +from .common import SearchInfoExtractor +from ..utils import ( + compat_urllib_parse, +) + + +class GoogleSearchIE(SearchInfoExtractor): + """Information Extractor for Google Video search queries.""" + _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"' + _MAX_RESULTS = 1000 + IE_NAME = u'video.google:search' + _SEARCH_KEY = 'gvsearch' + + def _get_n_results(self, query, n): + """Get a specified number of results for a query""" + + res = { + '_type': 'playlist', + 'id': query, + 'entries': [] + } + + for pagenum in itertools.count(1): + result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10) + webpage = self._download_webpage(result_url, u'gvsearch:' + query, + note='Downloading result page ' + str(pagenum)) + + for mobj in re.finditer(r'

n) or not re.search(self._MORE_PAGES_INDICATOR, webpage): + return res