From 2514d2635e0c8ff0fb72d00a093a849001df2bdd Mon Sep 17 00:00:00 2001 From: Jouke Waleson Date: Sun, 23 Nov 2014 21:23:05 +0100 Subject: [PATCH] PEP8: E225,E227 --- .../transition_helper_exe/youtube-dl.py | 2 +- youtube_dl/aes.py | 32 +++++++++---------- youtube_dl/downloader/f4m.py | 4 +-- youtube_dl/downloader/rtmp.py | 10 +++--- youtube_dl/extractor/cinemassacre.py | 2 +- youtube_dl/extractor/crunchyroll.py | 8 ++--- youtube_dl/extractor/dotsub.py | 2 +- youtube_dl/extractor/generic.py | 2 +- youtube_dl/extractor/iprima.py | 2 +- youtube_dl/extractor/lifenews.py | 2 +- youtube_dl/extractor/smotri.py | 2 +- youtube_dl/extractor/sohu.py | 2 +- youtube_dl/extractor/swrmediathek.py | 2 +- youtube_dl/extractor/theplatform.py | 2 +- youtube_dl/extractor/tudou.py | 2 +- youtube_dl/extractor/xtube.py | 2 +- youtube_dl/extractor/yahoo.py | 2 +- youtube_dl/postprocessor/ffmpeg.py | 4 +-- youtube_dl/update.py | 2 +- 19 files changed, 43 insertions(+), 43 deletions(-) diff --git a/devscripts/transition_helper_exe/youtube-dl.py b/devscripts/transition_helper_exe/youtube-dl.py index d75316cfa..ab59d9376 100644 --- a/devscripts/transition_helper_exe/youtube-dl.py +++ b/devscripts/transition_helper_exe/youtube-dl.py @@ -32,7 +32,7 @@ def b(x): signature = signature[2:] if not b('\x00') in signature: return False - signature = signature[signature.index(b('\x00')) +1:] + signature = signature[signature.index(b('\x00')) + 1:] if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False signature = signature[19:] diff --git a/youtube_dl/aes.py b/youtube_dl/aes.py index 2db8ab3a1..662d49011 100644 --- a/youtube_dl/aes.py +++ b/youtube_dl/aes.py @@ -24,8 +24,8 @@ def aes_ctr_decrypt(data, key, counter): decrypted_data = [] for i in range(block_count): counter_block = counter.next_value() - block = data[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES] - block += [0] *(BLOCK_SIZE_BYTES - len(block)) + block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] + block += [0] * (BLOCK_SIZE_BYTES - len(block)) cipher_counter_block = aes_encrypt(counter_block, expanded_key) decrypted_data += xor(block, cipher_counter_block) @@ -49,8 +49,8 @@ def aes_cbc_decrypt(data, key, iv): decrypted_data = [] previous_cipher_block = iv for i in range(block_count): - block = data[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES] - block += [0] *(BLOCK_SIZE_BYTES - len(block)) + block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] + block += [0] * (BLOCK_SIZE_BYTES - len(block)) decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) @@ -76,20 +76,20 @@ def key_expansion(data): temp = data[-4:] temp = key_schedule_core(temp, rcon_iteration) rcon_iteration += 1 - data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3): temp = data[-4:] - data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) if key_size_bytes == 32: temp = data[-4:] temp = sub_bytes(temp) - data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): temp = data[-4:] - data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data = data[:expanded_key_size_bytes] return data @@ -106,12 +106,12 @@ def aes_encrypt(data, expanded_key): rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) - for i in range(1, rounds +1): + for i in range(1, rounds + 1): data = sub_bytes(data) data = shift_rows(data) if i != rounds: data = mix_columns(data) - data = xor(data, expanded_key[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES]) + data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) return data @@ -127,7 +127,7 @@ def aes_decrypt(data, expanded_key): rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 for i in range(rounds, 0, -1): - data = xor(data, expanded_key[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES]) + data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) if i != rounds: data = mix_columns_inv(data) data = shift_rows_inv(data) @@ -155,14 +155,14 @@ def aes_decrypt_text(data, password, key_size_bytes): data = bytes_to_intlist(base64.b64decode(data)) password = bytes_to_intlist(password.encode('utf-8')) - key = password[:key_size_bytes] + [0] *(key_size_bytes - len(password)) + key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) nonce = data[:NONCE_LENGTH_BYTES] cipher = data[NONCE_LENGTH_BYTES:] class Counter: - __value = nonce + [0] *(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) + __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) def next_value(self): temp = self.__value @@ -270,7 +270,7 @@ def key_schedule_core(data, rcon_iteration): def xor(data1, data2): - return [x^y for x, y in zip(data1, data2)] + return [x ^ y for x, y in zip(data1, data2)] def rijndael_mul(a, b): @@ -293,7 +293,7 @@ def mix_column(data, matrix): def mix_columns(data, matrix=MIX_COLUMN_MATRIX): data_mixed = [] for i in range(4): - column = data[i *4: (i +1) *4] + column = data[i * 4: (i + 1) * 4] data_mixed += mix_column(column, matrix) return data_mixed @@ -320,7 +320,7 @@ def shift_rows_inv(data): def inc(data): data = data[:] # copy - for i in range(len(data) -1, -1, -1): + for i in range(len(data) - 1, -1, -1): if data[i] == 255: data[i] = 0 else: diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py index ed8df16d2..c752e8e24 100644 --- a/youtube_dl/downloader/f4m.py +++ b/youtube_dl/downloader/f4m.py @@ -55,7 +55,7 @@ def read_box_info(self): if size == 1: real_size = self.read_unsigned_long_long() header_end = 16 - return real_size, box_type, self.read(real_size -header_end) + return real_size, box_type, self.read(real_size - header_end) def read_asrt(self): # version @@ -180,7 +180,7 @@ def build_fragments_list(boot_info): n_frags = segment_run_entry[1] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] - for (i, frag_number) in zip(range(1, n_frags +1), itertools.count(first_frag_number)): + for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)): res.append((1, frag_number)) return res diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py index 642f13b9a..58ae2005c 100644 --- a/youtube_dl/downloader/rtmp.py +++ b/youtube_dl/downloader/rtmp.py @@ -46,13 +46,13 @@ def run_rtmpdump(args): continue mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) if mobj: - downloaded_data_len = int(float(mobj.group(1)) *1024) + downloaded_data_len = int(float(mobj.group(1)) * 1024) percent = float(mobj.group(2)) if not resume_percent: resume_percent = percent resume_downloaded_data_len = downloaded_data_len - eta = self.calc_eta(start, time.time(), 100 -resume_percent, percent -resume_percent) - speed = self.calc_speed(start, time.time(), downloaded_data_len -resume_downloaded_data_len) + eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent) + speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len) data_len = None if percent > 0: data_len = int(downloaded_data_len * 100 / percent) @@ -72,7 +72,7 @@ def run_rtmpdump(args): # no percent for live streams mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) if mobj: - downloaded_data_len = int(float(mobj.group(1)) *1024) + downloaded_data_len = int(float(mobj.group(1)) * 1024) time_now = time.time() speed = self.calc_speed(start, time_now, downloaded_data_len) self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) @@ -88,7 +88,7 @@ def run_rtmpdump(args): if not cursor_in_new_line: self.to_screen('') cursor_in_new_line = True - self.to_screen('[rtmpdump] ' +line) + self.to_screen('[rtmpdump] ' + line) proc.wait() if not cursor_in_new_line: self.to_screen('') diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py index bdc84f1f5..b7fa73c3b 100644 --- a/youtube_dl/extractor/cinemassacre.py +++ b/youtube_dl/extractor/cinemassacre.py @@ -77,7 +77,7 @@ def _real_extract(self, url): if videolist_url: videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML') formats = [] - baseurl = vidurl[:vidurl.rfind('/') +1] + baseurl = vidurl[:vidurl.rfind('/') + 1] for video in videolist.findall('.//video'): src = video.get('src') if not src: diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index ab03c8602..c3c4d114a 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -226,10 +226,10 @@ def _real_extract(self, url): formats = [] for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage): stream_quality, stream_format = self._FORMAT_IDS[fmt] - video_format = fmt +'p' + video_format = fmt + 'p' streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/') # urlencode doesn't work! - streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' +stream_quality +'&media%5Fid=' +stream_id +'&video%5Fformat=' +stream_format + streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') streamdata_req.add_header('Content-Length', str(len(streamdata_req.data))) streamdata = self._download_xml( @@ -248,8 +248,8 @@ def _real_extract(self, url): subtitles = {} sub_format = self._downloader.params.get('subtitlesformat', 'srt') for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): - sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' +sub_id,\ - video_id, note='Downloading subtitles for ' +sub_name) + sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,\ + video_id, note='Downloading subtitles for ' + sub_name) id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) iv = self._search_regex(r'([^<]+)', sub_page, 'subtitle_iv', fatal=False) data = self._search_regex(r'([^<]+)', sub_page, 'subtitle_data', fatal=False) diff --git a/youtube_dl/extractor/dotsub.py b/youtube_dl/extractor/dotsub.py index b30d70e7b..638bb33cd 100644 --- a/youtube_dl/extractor/dotsub.py +++ b/youtube_dl/extractor/dotsub.py @@ -27,7 +27,7 @@ def _real_extract(self, url): video_id = mobj.group('id') info_url = "https://dotsub.com/api/media/%s/metadata" % video_id info = self._download_json(info_url, video_id) - date = time.gmtime(info['dateCreated'] /1000) # The timestamp is in miliseconds + date = time.gmtime(info['dateCreated'] / 1000) # The timestamp is in miliseconds return { 'id': video_id, diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index a40ff6b64..109dd20db 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -748,7 +748,7 @@ def _playlist_from_matches(matches, getter, ie=None): # Look for embedded blip.tv player mobj = re.search(r']*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage) if mobj: - return self.url_result('http://blip.tv/a/a-' +mobj.group(1), 'BlipTV') + return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV') mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage) if mobj: return self.url_result(mobj.group(1), 'BlipTV') diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py index 90b4c966d..4247d6391 100644 --- a/youtube_dl/extractor/iprima.py +++ b/youtube_dl/extractor/iprima.py @@ -54,7 +54,7 @@ def _real_extract(self, url): player_url = ( 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % - (floor(random() *1073741824), floor(random() *1073741824)) + (floor(random() * 1073741824), floor(random() * 1073741824)) ) req = compat_urllib_request.Request(player_url) diff --git a/youtube_dl/extractor/lifenews.py b/youtube_dl/extractor/lifenews.py index e7ee3bba8..1dfe7f77f 100644 --- a/youtube_dl/extractor/lifenews.py +++ b/youtube_dl/extractor/lifenews.py @@ -71,4 +71,4 @@ def make_entry(video_id, media, video_number=None): if len(videos) == 1: return make_entry(video_id, videos[0]) else: - return [make_entry(video_id, media, video_number +1) for video_number, media in enumerate(videos)] + return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)] diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py index 9d2f8d40b..b6a71305f 100644 --- a/youtube_dl/extractor/smotri.py +++ b/youtube_dl/extractor/smotri.py @@ -184,7 +184,7 @@ def _real_extract(self, url): view_count = self._html_search_regex( 'Общее количество просмотров.*?(\\d+)', - webpage, 'view count', fatal=False, flags=re.MULTILINE|re.DOTALL) + webpage, 'view count', fatal=False, flags=re.MULTILINE | re.DOTALL) return { 'id': video_id, diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index 875d09faa..0348e7460 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -69,7 +69,7 @@ def _fetch_data(vid_id, mytv=False): (allot, prot, clipsURL[i], su[i])) part_str = self._download_webpage( part_url, video_id, - note=u'Downloading part %d of %d' % (i +1, part_count)) + note=u'Downloading part %d of %d' % (i + 1, part_count)) part_info = part_str.split('|') video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) diff --git a/youtube_dl/extractor/swrmediathek.py b/youtube_dl/extractor/swrmediathek.py index 4132b6428..58073eefe 100644 --- a/youtube_dl/extractor/swrmediathek.py +++ b/youtube_dl/extractor/swrmediathek.py @@ -80,7 +80,7 @@ def _real_extract(self, url): if media_type == 'Video': fmt.update({ - 'format_note': ['144p', '288p', '544p', '720p'][quality -1], + 'format_note': ['144p', '288p', '544p', '720p'][quality - 1], 'vcodec': codec, }) elif media_type == 'Audio': diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py index 40b22677b..522a095a2 100644 --- a/youtube_dl/extractor/theplatform.py +++ b/youtube_dl/extractor/theplatform.py @@ -118,5 +118,5 @@ def _real_extract(self, url): 'formats': formats, 'description': info['description'], 'thumbnail': info['defaultThumbnailUrl'], - 'duration': info['duration'] //1000, + 'duration': info['duration'] // 1000, } diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py index 3007b136f..b6e4a432b 100644 --- a/youtube_dl/extractor/tudou.py +++ b/youtube_dl/extractor/tudou.py @@ -37,7 +37,7 @@ class TudouIE(InfoExtractor): }] def _url_for_id(self, id, quality = None): - info_url = "http://v2.tudou.com/f?id=" +str(id) + info_url = "http://v2.tudou.com/f?id=" + str(id) if quality: info_url += '&hd' + quality webpage = self._download_webpage(info_url, id, "Opening the info webpage") diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index 26a51340f..38448e7c0 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -97,7 +97,7 @@ def _real_extract(self, url): url, username, note='Retrieving profile page') video_count = int(self._search_regex( - r'%s\'s Videos \(([0-9]+)\)' %username, profile_page, + r'%s\'s Videos \(([0-9]+)\)' % username, profile_page, 'video count')) PAGE_SIZE = 25 diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py index 514c16127..39caf60f2 100644 --- a/youtube_dl/extractor/yahoo.py +++ b/youtube_dl/extractor/yahoo.py @@ -229,7 +229,7 @@ def _get_n_results(self, query, n): for pagenum in itertools.count(0): result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30) info = self._download_json(result_url, query, - note='Downloading results page ' +str(pagenum +1)) + note='Downloading results page ' + str(pagenum + 1)) m = info['m'] results = info['results'] diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py index d9f1cceb9..002a35c67 100644 --- a/youtube_dl/postprocessor/ffmpeg.py +++ b/youtube_dl/postprocessor/ffmpeg.py @@ -246,7 +246,7 @@ def run(self, information): if information['ext'] == self._preferedformat: self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) return True, information - self._downloader.to_screen(u'[' +'ffmpeg' +'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) + self._downloader.to_screen(u'[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) self.run_ffmpeg(path, outpath, []) information['filepath'] = outpath information['format'] = self._preferedformat @@ -466,7 +466,7 @@ def run(self, information): opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy'] for (i, lang) in enumerate(sub_langs): - opts.extend(['-map', '%d:0' % (i +1), '-c:s:%d' % i, 'mov_text']) + opts.extend(['-map', '%d:0' % (i + 1), '-c:s:%d' % i, 'mov_text']) lang_code = self._conver_lang_code(lang) if lang_code is not None: opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) diff --git a/youtube_dl/update.py b/youtube_dl/update.py index 23457e07a..448ad229c 100644 --- a/youtube_dl/update.py +++ b/youtube_dl/update.py @@ -41,7 +41,7 @@ def b(x): signature = signature[2:] if not b('\x00') in signature: return False - signature = signature[signature.index(b('\x00')) +1:] + signature = signature[signature.index(b('\x00')) + 1:] if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False signature = signature[19:]