1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-02 09:12:40 +01:00

[vodlocker] PEP8, generalization, and simplification (#3223)

This commit is contained in:
Philipp Hagemeister 2014-07-11 10:57:08 +02:00
parent c09cbf0ed9
commit 4094b6e36d
2 changed files with 18 additions and 12 deletions

View File

@ -1,11 +1,12 @@
import base64 import base64
import hashlib import hashlib
import json import json
import netrc
import os import os
import re import re
import socket import socket
import sys import sys
import netrc import time
import xml.etree.ElementTree import xml.etree.ElementTree
from ..utils import ( from ..utils import (
@ -575,6 +576,13 @@ def _proto_relative_url(self, url, scheme=None):
else: else:
return url return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = u'%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
class SearchInfoExtractor(InfoExtractor): class SearchInfoExtractor(InfoExtractor):
""" """
@ -618,4 +626,3 @@ def _get_n_results(self, query, n):
@property @property
def SEARCH_KEY(self): def SEARCH_KEY(self):
return self._SEARCH_KEY return self._SEARCH_KEY

View File

@ -28,9 +28,6 @@ class VodlockerIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') video_id = mobj.group('id')
url = 'http://vodlocker.com/%s' % video_id
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
fields = dict(re.findall(r'''(?x)<input\s+ fields = dict(re.findall(r'''(?x)<input\s+
@ -41,21 +38,23 @@ def _real_extract(self, url):
''', webpage)) ''', webpage))
if fields['op'] == 'download1': if fields['op'] == 'download1':
time.sleep(3) #they do detect when requests happen too fast! self._sleep(3, video_id) # they do detect when requests happen too fast!
post = compat_urllib_parse.urlencode(fields) post = compat_urllib_parse.urlencode(fields)
req = compat_urllib_request.Request(url, post) req = compat_urllib_request.Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id, 'Downloading video page') webpage = self._download_webpage(
req, video_id, 'Downloading video page')
title = self._search_regex(r'id="file_title".*?>\s*(.*?)\s*<span', webpage, 'title') title = self._search_regex(
thumbnail = self._search_regex(r'image:\s*"(http[^\"]+)",', webpage, 'thumbnail') r'id="file_title".*?>\s*(.*?)\s*<span', webpage, 'title')
url = self._search_regex(r'file:\s*"(http[^\"]+)",', webpage, 'file url') thumbnail = self._search_regex(
r'image:\s*"(http[^\"]+)",', webpage, 'thumbnail')
url = self._search_regex(
r'file:\s*"(http[^\"]+)",', webpage, 'file url')
formats = [{ formats = [{
'format_id': 'sd', 'format_id': 'sd',
'url': url, 'url': url,
'ext': determine_ext(url),
'quality': 1,
}] }]
return { return {