Compare commits
	
		
			174 Commits
		
	
	
		
			2014.11.14
			...
			2014.11.26
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					72476fcc42 | ||
| 
						 | 
					29e950f7c8 | ||
| 
						 | 
					7c8ea53b96 | ||
| 
						 | 
					dcddc10a50 | ||
| 
						 | 
					a1008af412 | ||
| 
						 | 
					61c0663c1e | ||
| 
						 | 
					81a7a521c5 | ||
| 
						 | 
					e293711802 | ||
| 
						 | 
					ceb3367320 | ||
| 
						 | 
					a03aaaed2e | ||
| 
						 | 
					e075a44afb | ||
| 
						 | 
					8865bdeb37 | ||
| 
						 | 
					3aa578cad2 | ||
| 
						 | 
					d3b5101a91 | ||
| 
						 | 
					5c32110114 | ||
| 
						 | 
					24144e3b8d | ||
| 
						 | 
					b3034f9df7 | ||
| 
						 | 
					4c6d2ff8dc | ||
| 
						 | 
					faf3494894 | ||
| 
						 | 
					535a66ef66 | ||
| 
						 | 
					5c40bba82f | ||
| 
						 | 
					855dc479c2 | ||
| 
						 | 
					0792d5634e | ||
| 
						 | 
					e91cdcae1a | ||
| 
						 | 
					27e1400f55 | ||
| 
						 | 
					e0938e7731 | ||
| 
						 | 
					b72823a0a4 | ||
| 
						 | 
					673cf0e773 | ||
| 
						 | 
					f8aace93cd | ||
| 
						 | 
					80310134e0 | ||
| 
						 | 
					4d2d638df4 | ||
| 
						 | 
					0e44f90e18 | ||
| 
						 | 
					15938ab67a | ||
| 
						 | 
					ab4ee31eb1 | ||
| 
						 | 
					b061ea6e9f | ||
| 
						 | 
					4aae94f9d0 | ||
| 
						 | 
					acda92f6bc | ||
| 
						 | 
					ddfd0f2727 | ||
| 
						 | 
					d0720e7118 | ||
| 
						 | 
					4e262a8838 | ||
| 
						 | 
					b9ed3af343 | ||
| 
						 | 
					63c9b2c1d9 | ||
| 
						 | 
					65f3a228b1 | ||
| 
						 | 
					3004ae2c3a | ||
| 
						 | 
					d9836a5917 | ||
| 
						 | 
					be64b5b098 | ||
| 
						 | 
					c3e74731c2 | ||
| 
						 | 
					c920d7f00d | ||
| 
						 | 
					0bbf12239c | ||
| 
						 | 
					70d68eb46f | ||
| 
						 | 
					c553fe5d29 | ||
| 
						 | 
					f0c3d729d7 | ||
| 
						 | 
					1cdedfee10 | ||
| 
						 | 
					93129d9442 | ||
| 
						 | 
					e8c8653e9d | ||
| 
						 | 
					fab89c67c5 | ||
| 
						 | 
					3d960a22fa | ||
| 
						 | 
					51bbb084d3 | ||
| 
						 | 
					2c25a2bd29 | ||
| 
						 | 
					355682be01 | ||
| 
						 | 
					00e9d396ab | ||
| 
						 | 
					14d4e90eb1 | ||
| 
						 | 
					b74e86f48a | ||
| 
						 | 
					3d36cea4ac | ||
| 
						 | 
					380b822003 | ||
| 
						 | 
					b66e699877 | ||
| 
						 | 
					27f8b0994e | ||
| 
						 | 
					e311b6389a | ||
| 
						 | 
					fab6d4c048 | ||
| 
						 | 
					4ffc31033e | ||
| 
						 | 
					c1777d5cb3 | ||
| 
						 | 
					9e1a5b8455 | ||
| 
						 | 
					784b6d3a9b | ||
| 
						 | 
					c66bdc4869 | ||
| 
						 | 
					2514d2635e | ||
| 
						 | 
					8bcc875676 | ||
| 
						 | 
					5f6a1245ff | ||
| 
						 | 
					f3a3407226 | ||
| 
						 | 
					598c218f7b | ||
| 
						 | 
					4698b14b76 | ||
| 
						 | 
					835a22ef3f | ||
| 
						 | 
					7d4111ed14 | ||
| 
						 | 
					d37cab2a9d | ||
| 
						 | 
					d16abf434a | ||
| 
						 | 
					a8363f3ab7 | ||
| 
						 | 
					010cd3a3ee | ||
| 
						 | 
					b9042def9d | ||
| 
						 | 
					aa79ac0c82 | ||
| 
						 | 
					88125905cf | ||
| 
						 | 
					dd60be2bf9 | ||
| 
						 | 
					119b3caa46 | ||
| 
						 | 
					49f0da7ae1 | ||
| 
						 | 
					2cead7e7bc | ||
| 
						 | 
					9262867e86 | ||
| 
						 | 
					b9272e8f8f | ||
| 
						 | 
					021a0db8f7 | ||
| 
						 | 
					e1e8b6897b | ||
| 
						 | 
					53d1cd1f77 | ||
| 
						 | 
					cad985ab4d | ||
| 
						 | 
					c52331f30c | ||
| 
						 | 
					42e1ff8665 | ||
| 
						 | 
					2c64b8ba63 | ||
| 
						 | 
					42e12102a9 | ||
| 
						 | 
					6127693ed9 | ||
| 
						 | 
					71069d2157 | ||
| 
						 | 
					f3391db889 | ||
| 
						 | 
					9b32eca3ce | ||
| 
						 | 
					ec06f0f610 | ||
| 
						 | 
					e6c9c8f6ee | ||
| 
						 | 
					85b9275517 | ||
| 
						 | 
					dfd5313afd | ||
| 
						 | 
					be53e2a737 | ||
| 
						 | 
					a1c68b9ef2 | ||
| 
						 | 
					4d46c1c68c | ||
| 
						 | 
					d6f714f321 | ||
| 
						 | 
					8569f3d629 | ||
| 
						 | 
					fed5d03260 | ||
| 
						 | 
					6adeffa7c6 | ||
| 
						 | 
					b244b5c3f9 | ||
| 
						 | 
					f42c190769 | ||
| 
						 | 
					c9bf41145f | ||
| 
						 | 
					5239075bb6 | ||
| 
						 | 
					84437adfa3 | ||
| 
						 | 
					732ea2f09b | ||
| 
						 | 
					aff2f4f4f5 | ||
| 
						 | 
					3b9f631c41 | ||
| 
						 | 
					3ba098a6a5 | ||
| 
						 | 
					1394646a0a | ||
| 
						 | 
					61ee5aeb73 | ||
| 
						 | 
					07e378fa18 | ||
| 
						 | 
					e07e931375 | ||
| 
						 | 
					480b7c32a9 | ||
| 
						 | 
					f56875f271 | ||
| 
						 | 
					92120217eb | ||
| 
						 | 
					37eddd3143 | ||
| 
						 | 
					02a12f9fe6 | ||
| 
						 | 
					6fcd6e0e21 | ||
| 
						 | 
					0857baade3 | ||
| 
						 | 
					469d4c8968 | ||
| 
						 | 
					23ad44b57b | ||
| 
						 | 
					f48d3e9bbc | ||
| 
						 | 
					fbf94a7815 | ||
| 
						 | 
					1921b24551 | ||
| 
						 | 
					28e614de5c | ||
| 
						 | 
					cd9ad1d7e8 | ||
| 
						 | 
					162f54eca6 | ||
| 
						 | 
					33a266f4ba | ||
| 
						 | 
					6b592d93a2 | ||
| 
						 | 
					4686ae4b64 | ||
| 
						 | 
					8d05f2c16a | ||
| 
						 | 
					a4bb83956c | ||
| 
						 | 
					eb5376044c | ||
| 
						 | 
					3cbcff8a2d | ||
| 
						 | 
					e983cf5277 | ||
| 
						 | 
					0ab1ca5501 | ||
| 
						 | 
					4baafa229d | ||
| 
						 | 
					7f3e33a147 | ||
| 
						 | 
					b7558d9881 | ||
| 
						 | 
					a0f59cdcb4 | ||
| 
						 | 
					a4bc433619 | ||
| 
						 | 
					b6b70730bf | ||
| 
						 | 
					6a68bb574a | ||
| 
						 | 
					0cf166ad4f | ||
| 
						 | 
					2707b50ffe | ||
| 
						 | 
					939fe70de0 | ||
| 
						 | 
					89c15fe0b3 | ||
| 
						 | 
					ec5f601670 | ||
| 
						 | 
					8caa0c9779 | ||
| 
						 | 
					e2548b5b25 | ||
| 
						 | 
					bbefcf04bf | ||
| 
						 | 
					c7b0add86f | ||
| 
						 | 
					a0155d93d9 | ||
| 
						 | 
					00d9ef0b70 | ||
| 
						 | 
					0cc8888038 | 
							
								
								
									
										6
									
								
								AUTHORS
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								AUTHORS
									
									
									
									
									
								
							@@ -82,3 +82,9 @@ Xavier Beynon
 | 
			
		||||
Gabriel Schubiner
 | 
			
		||||
xantares
 | 
			
		||||
Jan Matějka
 | 
			
		||||
Mauroy Sébastien
 | 
			
		||||
William Sewell
 | 
			
		||||
Dao Hoang Son
 | 
			
		||||
Oskar Jauch
 | 
			
		||||
Matthew Rayfield
 | 
			
		||||
t0mm0
 | 
			
		||||
 
 | 
			
		||||
@@ -30,7 +30,7 @@ Alternatively, refer to the developer instructions below for how to check out an
 | 
			
		||||
# DESCRIPTION
 | 
			
		||||
**youtube-dl** is a small command-line program to download videos from
 | 
			
		||||
YouTube.com and a few more sites. It requires the Python interpreter, version
 | 
			
		||||
2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
 | 
			
		||||
2.6, 2.7, or 3.2+, and it is not platform specific. It should work on
 | 
			
		||||
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
 | 
			
		||||
which means you can modify it, redistribute it or use it however you like.
 | 
			
		||||
 | 
			
		||||
@@ -93,7 +93,8 @@ which means you can modify it, redistribute it or use it however you like.
 | 
			
		||||
                                     COUNT views
 | 
			
		||||
    --max-views COUNT                Do not download any videos with more than
 | 
			
		||||
                                     COUNT views
 | 
			
		||||
    --no-playlist                    download only the currently playing video
 | 
			
		||||
    --no-playlist                    If the URL refers to a video and a
 | 
			
		||||
                                     playlist, download only the video.
 | 
			
		||||
    --age-limit YEARS                download only videos suitable for the given
 | 
			
		||||
                                     age
 | 
			
		||||
    --download-archive FILE          Download only videos not listed in the
 | 
			
		||||
@@ -492,14 +493,15 @@ If you want to add support for a new site, you can follow this quick list (assum
 | 
			
		||||
 | 
			
		||||
        def _real_extract(self, url):
 | 
			
		||||
            video_id = self._match_id(url)
 | 
			
		||||
            webpage = self._download_webpage(url, video_id)
 | 
			
		||||
 | 
			
		||||
            # TODO more code goes here, for example ...
 | 
			
		||||
            webpage = self._download_webpage(url, video_id)
 | 
			
		||||
            title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
 | 
			
		||||
 | 
			
		||||
            return {
 | 
			
		||||
                'id': video_id,
 | 
			
		||||
                'title': title,
 | 
			
		||||
                'description': self._og_search_description(webpage),
 | 
			
		||||
                # TODO more properties (see youtube_dl/extractor/common.py)
 | 
			
		||||
            }
 | 
			
		||||
    ```
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,6 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
from os.path import dirname as dirn
 | 
			
		||||
import sys
 | 
			
		||||
@@ -9,16 +11,17 @@ import youtube_dl
 | 
			
		||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
 | 
			
		||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def build_completion(opt_parser):
 | 
			
		||||
    opts_flag = []
 | 
			
		||||
    for group in opt_parser.option_groups:
 | 
			
		||||
        for option in group.option_list:
 | 
			
		||||
            #for every long flag
 | 
			
		||||
            # for every long flag
 | 
			
		||||
            opts_flag.append(option.get_opt_string())
 | 
			
		||||
    with open(BASH_COMPLETION_TEMPLATE) as f:
 | 
			
		||||
        template = f.read()
 | 
			
		||||
    with open(BASH_COMPLETION_FILE, "w") as f:
 | 
			
		||||
        #just using the special char
 | 
			
		||||
        # just using the special char
 | 
			
		||||
        filled_template = template.replace("{{flags}}", " ".join(opts_flag))
 | 
			
		||||
        f.write(filled_template)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code):
 | 
			
		||||
 | 
			
		||||
def win_service_main(service_name, real_main, argc, argv_raw):
 | 
			
		||||
    try:
 | 
			
		||||
        #args = [argv_raw[i].value for i in range(argc)]
 | 
			
		||||
        # args = [argv_raw[i].value for i in range(argc)]
 | 
			
		||||
        stop_event = threading.Event()
 | 
			
		||||
        handler = HandlerEx(functools.partial(stop_event, win_service_handler))
 | 
			
		||||
        h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
 | 
			
		||||
@@ -233,6 +233,7 @@ def rmtree(path):
 | 
			
		||||
 | 
			
		||||
#==============================================================================
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BuildError(Exception):
 | 
			
		||||
    def __init__(self, output, code=500):
 | 
			
		||||
        self.output = output
 | 
			
		||||
@@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
 | 
			
		||||
    actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
 | 
			
		||||
    actionDict = {'build': Builder, 'download': Builder}  # They're the same, no more caching.
 | 
			
		||||
 | 
			
		||||
    def do_GET(self):
 | 
			
		||||
        path = urlparse.urlparse(self.path)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
 | 
			
		||||
 
 | 
			
		||||
@@ -23,13 +23,13 @@ EXTRA_ARGS = {
 | 
			
		||||
    'batch-file': ['--require-parameter'],
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def build_completion(opt_parser):
 | 
			
		||||
    commands = []
 | 
			
		||||
 | 
			
		||||
    for group in opt_parser.option_groups:
 | 
			
		||||
        for option in group.option_list:
 | 
			
		||||
            long_option = option.get_opt_string().strip('-')
 | 
			
		||||
            help_msg = shell_quote([option.help])
 | 
			
		||||
            complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
 | 
			
		||||
            if option._short_opts:
 | 
			
		||||
                complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
import sys
 | 
			
		||||
 
 | 
			
		||||
@@ -1,8 +1,7 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import hashlib
 | 
			
		||||
import shutil
 | 
			
		||||
import subprocess
 | 
			
		||||
import tempfile
 | 
			
		||||
import urllib.request
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
from __future__ import unicode_literals, with_statement
 | 
			
		||||
 | 
			
		||||
import rsa
 | 
			
		||||
import json
 | 
			
		||||
@@ -11,22 +12,23 @@ except NameError:
 | 
			
		||||
 | 
			
		||||
versions_info = json.load(open('update/versions.json'))
 | 
			
		||||
if 'signature' in versions_info:
 | 
			
		||||
	del versions_info['signature']
 | 
			
		||||
    del versions_info['signature']
 | 
			
		||||
 | 
			
		||||
print('Enter the PKCS1 private key, followed by a blank line:')
 | 
			
		||||
privkey = b''
 | 
			
		||||
while True:
 | 
			
		||||
	try:
 | 
			
		||||
		line = input()
 | 
			
		||||
	except EOFError:
 | 
			
		||||
		break
 | 
			
		||||
	if line == '':
 | 
			
		||||
		break
 | 
			
		||||
	privkey += line.encode('ascii') + b'\n'
 | 
			
		||||
    try:
 | 
			
		||||
        line = input()
 | 
			
		||||
    except EOFError:
 | 
			
		||||
        break
 | 
			
		||||
    if line == '':
 | 
			
		||||
        break
 | 
			
		||||
    privkey += line.encode('ascii') + b'\n'
 | 
			
		||||
privkey = rsa.PrivateKey.load_pkcs1(privkey)
 | 
			
		||||
 | 
			
		||||
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
 | 
			
		||||
print('signature: ' + signature)
 | 
			
		||||
 | 
			
		||||
versions_info['signature'] = signature
 | 
			
		||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
 | 
			
		||||
with open('update/versions.json', 'w') as versionsf:
 | 
			
		||||
    json.dump(versions_info, versionsf, indent=4, sort_keys=True)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,11 +1,11 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
 | 
			
		||||
from __future__ import with_statement
 | 
			
		||||
from __future__ import with_statement, unicode_literals
 | 
			
		||||
 | 
			
		||||
import datetime
 | 
			
		||||
import glob
 | 
			
		||||
import io # For Python 2 compatibilty
 | 
			
		||||
import io  # For Python 2 compatibilty
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
@@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year)
 | 
			
		||||
for fn in glob.glob('*.html*'):
 | 
			
		||||
    with io.open(fn, encoding='utf-8') as f:
 | 
			
		||||
        content = f.read()
 | 
			
		||||
    newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
 | 
			
		||||
    newc = re.sub(r'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', 'Copyright © 2006-' + year, content)
 | 
			
		||||
    if content != newc:
 | 
			
		||||
        tmpFn = fn + '.part'
 | 
			
		||||
        with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import datetime
 | 
			
		||||
import io
 | 
			
		||||
@@ -73,4 +74,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
 | 
			
		||||
 | 
			
		||||
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
 | 
			
		||||
    atom_file.write(atom_template)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import os
 | 
			
		||||
@@ -9,6 +10,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
 | 
			
		||||
 | 
			
		||||
import youtube_dl
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
 | 
			
		||||
        template = tmplf.read()
 | 
			
		||||
@@ -21,7 +23,7 @@ def main():
 | 
			
		||||
            continue
 | 
			
		||||
        elif ie_desc is not None:
 | 
			
		||||
            ie_html += ': {}'.format(ie.IE_DESC)
 | 
			
		||||
        if ie.working() == False:
 | 
			
		||||
        if not ie.working():
 | 
			
		||||
            ie_html += ' (Currently broken)'
 | 
			
		||||
        ie_htmls.append('<li>{}</li>'.format(ie_html))
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,5 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import io
 | 
			
		||||
import sys
 | 
			
		||||
import re
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,4 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import io
 | 
			
		||||
import os.path
 | 
			
		||||
 
 | 
			
		||||
@@ -1,40 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import sys, os
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.request as compat_urllib_request
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
    import urllib2 as compat_urllib_request
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
 | 
			
		||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
 | 
			
		||||
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
	raw_input()
 | 
			
		||||
except NameError: # Python 3
 | 
			
		||||
	input()
 | 
			
		||||
 | 
			
		||||
filename = sys.argv[0]
 | 
			
		||||
 | 
			
		||||
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
 | 
			
		||||
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
 | 
			
		||||
 | 
			
		||||
if not os.access(filename, os.W_OK):
 | 
			
		||||
    sys.exit('ERROR: no write permissions on %s' % filename)
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    urlh = compat_urllib_request.urlopen(BIN_URL)
 | 
			
		||||
    newcontent = urlh.read()
 | 
			
		||||
    urlh.close()
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to download latest version')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    with open(filename, 'wb') as outf:
 | 
			
		||||
        outf.write(newcontent)
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to overwrite current version')
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
 | 
			
		||||
@@ -1,12 +0,0 @@
 | 
			
		||||
from distutils.core import setup
 | 
			
		||||
import py2exe
 | 
			
		||||
 | 
			
		||||
py2exe_options = {
 | 
			
		||||
    "bundle_files": 1,
 | 
			
		||||
    "compressed": 1,
 | 
			
		||||
    "optimize": 2,
 | 
			
		||||
    "dist_dir": '.',
 | 
			
		||||
    "dll_excludes": ['w9xpopen.exe']
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
 | 
			
		||||
@@ -1,102 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
import sys, os
 | 
			
		||||
import urllib2
 | 
			
		||||
import json, hashlib
 | 
			
		||||
 | 
			
		||||
def rsa_verify(message, signature, key):
 | 
			
		||||
    from struct import pack
 | 
			
		||||
    from hashlib import sha256
 | 
			
		||||
    from sys import version_info
 | 
			
		||||
    def b(x):
 | 
			
		||||
        if version_info[0] == 2: return x
 | 
			
		||||
        else: return x.encode('latin1')
 | 
			
		||||
    assert(type(message) == type(b('')))
 | 
			
		||||
    block_size = 0
 | 
			
		||||
    n = key[0]
 | 
			
		||||
    while n:
 | 
			
		||||
        block_size += 1
 | 
			
		||||
        n >>= 8
 | 
			
		||||
    signature = pow(int(signature, 16), key[1], key[0])
 | 
			
		||||
    raw_bytes = []
 | 
			
		||||
    while signature:
 | 
			
		||||
        raw_bytes.insert(0, pack("B", signature & 0xFF))
 | 
			
		||||
        signature >>= 8
 | 
			
		||||
    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
 | 
			
		||||
    if signature[0:2] != b('\x00\x01'): return False
 | 
			
		||||
    signature = signature[2:]
 | 
			
		||||
    if not b('\x00') in signature: return False
 | 
			
		||||
    signature = signature[signature.index(b('\x00'))+1:]
 | 
			
		||||
    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
 | 
			
		||||
    signature = signature[19:]
 | 
			
		||||
    if signature != sha256(message).digest(): return False
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
 | 
			
		||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
 | 
			
		||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
 | 
			
		||||
 | 
			
		||||
raw_input()
 | 
			
		||||
 | 
			
		||||
filename = sys.argv[0]
 | 
			
		||||
 | 
			
		||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
 | 
			
		||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
 | 
			
		||||
JSON_URL = UPDATE_URL + 'versions.json'
 | 
			
		||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
 | 
			
		||||
 | 
			
		||||
if not os.access(filename, os.W_OK):
 | 
			
		||||
    sys.exit('ERROR: no write permissions on %s' % filename)
 | 
			
		||||
 | 
			
		||||
exe = os.path.abspath(filename)
 | 
			
		||||
directory = os.path.dirname(exe)
 | 
			
		||||
if not os.access(directory, os.W_OK):
 | 
			
		||||
    sys.exit('ERROR: no write permissions on %s' % directory)
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
 | 
			
		||||
    versions_info = json.loads(versions_info)
 | 
			
		||||
except:
 | 
			
		||||
    sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
 | 
			
		||||
if not 'signature' in versions_info:
 | 
			
		||||
    sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
 | 
			
		||||
signature = versions_info['signature']
 | 
			
		||||
del versions_info['signature']
 | 
			
		||||
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
 | 
			
		||||
    sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
 | 
			
		||||
 | 
			
		||||
version = versions_info['versions'][versions_info['latest']]
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    urlh = urllib2.urlopen(version['exe'][0])
 | 
			
		||||
    newcontent = urlh.read()
 | 
			
		||||
    urlh.close()
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to download latest version')
 | 
			
		||||
 | 
			
		||||
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
 | 
			
		||||
if newcontent_hash != version['exe'][1]:
 | 
			
		||||
    sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    with open(exe + '.new', 'wb') as outf:
 | 
			
		||||
        outf.write(newcontent)
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit(u'ERROR: unable to write the new version')
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    bat = os.path.join(directory, 'youtube-dl-updater.bat')
 | 
			
		||||
    b = open(bat, 'w')
 | 
			
		||||
    b.write("""
 | 
			
		||||
echo Updating youtube-dl...
 | 
			
		||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
 | 
			
		||||
move /Y "%s.new" "%s"
 | 
			
		||||
del "%s"
 | 
			
		||||
    \n""" %(exe, exe, bat))
 | 
			
		||||
    b.close()
 | 
			
		||||
 | 
			
		||||
    os.startfile(bat)
 | 
			
		||||
except (IOError, OSError) as err:
 | 
			
		||||
    sys.exit('ERROR: unable to overwrite current version')
 | 
			
		||||
 | 
			
		||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
 | 
			
		||||
@@ -1,4 +1,6 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
from os.path import dirname as dirn
 | 
			
		||||
import sys
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										5
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										5
									
								
								setup.py
									
									
									
									
									
								
							@@ -4,7 +4,6 @@
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import os.path
 | 
			
		||||
import pkg_resources
 | 
			
		||||
import warnings
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
@@ -103,7 +102,9 @@ setup(
 | 
			
		||||
        "Programming Language :: Python :: 2.6",
 | 
			
		||||
        "Programming Language :: Python :: 2.7",
 | 
			
		||||
        "Programming Language :: Python :: 3",
 | 
			
		||||
        "Programming Language :: Python :: 3.3"
 | 
			
		||||
        "Programming Language :: Python :: 3.2",
 | 
			
		||||
        "Programming Language :: Python :: 3.3",
 | 
			
		||||
        "Programming Language :: Python :: 3.4",
 | 
			
		||||
    ],
 | 
			
		||||
 | 
			
		||||
    **params
 | 
			
		||||
 
 | 
			
		||||
@@ -59,7 +59,7 @@ class FakeYDL(YoutubeDL):
 | 
			
		||||
        params = get_params(override=override)
 | 
			
		||||
        super(FakeYDL, self).__init__(params, auto_init=False)
 | 
			
		||||
        self.result = []
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
    def to_screen(self, s, skip_eol=None):
 | 
			
		||||
        print(s)
 | 
			
		||||
 | 
			
		||||
@@ -72,8 +72,10 @@ class FakeYDL(YoutubeDL):
 | 
			
		||||
    def expect_warning(self, regex):
 | 
			
		||||
        # Silence an expected warning matching a regex
 | 
			
		||||
        old_report_warning = self.report_warning
 | 
			
		||||
 | 
			
		||||
        def report_warning(self, message):
 | 
			
		||||
            if re.match(regex, message): return
 | 
			
		||||
            if re.match(regex, message):
 | 
			
		||||
                return
 | 
			
		||||
            old_report_warning(message)
 | 
			
		||||
        self.report_warning = types.MethodType(report_warning, self)
 | 
			
		||||
 | 
			
		||||
@@ -114,14 +116,14 @@ def expect_info_dict(self, expected_dict, got_dict):
 | 
			
		||||
        elif isinstance(expected, type):
 | 
			
		||||
            got = got_dict.get(info_field)
 | 
			
		||||
            self.assertTrue(isinstance(got, expected),
 | 
			
		||||
                'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
 | 
			
		||||
                            'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
 | 
			
		||||
        else:
 | 
			
		||||
            if isinstance(expected, compat_str) and expected.startswith('md5:'):
 | 
			
		||||
                got = 'md5:' + md5(got_dict.get(info_field))
 | 
			
		||||
            else:
 | 
			
		||||
                got = got_dict.get(info_field)
 | 
			
		||||
            self.assertEqual(expected, got,
 | 
			
		||||
                'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
 | 
			
		||||
                             'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
 | 
			
		||||
 | 
			
		||||
    # Check for the presence of mandatory fields
 | 
			
		||||
    if got_dict.get('_type') != 'playlist':
 | 
			
		||||
@@ -133,8 +135,8 @@ def expect_info_dict(self, expected_dict, got_dict):
 | 
			
		||||
 | 
			
		||||
    # Are checkable fields missing from the test case definition?
 | 
			
		||||
    test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
 | 
			
		||||
        for key, value in got_dict.items()
 | 
			
		||||
        if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
 | 
			
		||||
                          for key, value in got_dict.items()
 | 
			
		||||
                          if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
 | 
			
		||||
    missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
 | 
			
		||||
    if missing_keys:
 | 
			
		||||
        def _repr(v):
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										18
									
								
								test/swftests/ConstArrayAccess.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								test/swftests/ConstArrayAccess.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,18 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: 4
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class ConstArrayAccess {
 | 
			
		||||
	private static const x:int = 2;
 | 
			
		||||
	private static const ar:Array = ["42", "3411"];
 | 
			
		||||
 | 
			
		||||
    public static function main():int{
 | 
			
		||||
        var c:ConstArrayAccess = new ConstArrayAccess();
 | 
			
		||||
        return c.f();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public function f(): int {
 | 
			
		||||
    	return ar[1].length;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										12
									
								
								test/swftests/ConstantInt.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								test/swftests/ConstantInt.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,12 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: 2
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class ConstantInt {
 | 
			
		||||
	private static const x:int = 2;
 | 
			
		||||
 | 
			
		||||
    public static function main():int{
 | 
			
		||||
        return x;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										10
									
								
								test/swftests/DictCall.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								test/swftests/DictCall.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
// input: [{"x": 1, "y": 2}]
 | 
			
		||||
// output: 3
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class DictCall {
 | 
			
		||||
    public static function main(d:Object):int{
 | 
			
		||||
        return d.x + d.y;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										10
									
								
								test/swftests/EqualsOperator.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								test/swftests/EqualsOperator.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,10 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: false
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class EqualsOperator {
 | 
			
		||||
    public static function main():Boolean{
 | 
			
		||||
        return 1 == 2;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										22
									
								
								test/swftests/MemberAssignment.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								test/swftests/MemberAssignment.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
// input: [1]
 | 
			
		||||
// output: 2
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class MemberAssignment {
 | 
			
		||||
    public var v:int;
 | 
			
		||||
 | 
			
		||||
    public function g():int {
 | 
			
		||||
        return this.v;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public function f(a:int):int{
 | 
			
		||||
        this.v = a;
 | 
			
		||||
        return this.v + this.g();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public static function main(a:int): int {
 | 
			
		||||
        var v:MemberAssignment = new MemberAssignment();
 | 
			
		||||
        return v.f(a);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										24
									
								
								test/swftests/NeOperator.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								test/swftests/NeOperator.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,24 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: 123
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class NeOperator {
 | 
			
		||||
    public static function main(): int {
 | 
			
		||||
        var res:int = 0;
 | 
			
		||||
        if (1 != 2) {
 | 
			
		||||
            res += 3;
 | 
			
		||||
        } else {
 | 
			
		||||
            res += 4;
 | 
			
		||||
        }
 | 
			
		||||
        if (2 != 2) {
 | 
			
		||||
            res += 10;
 | 
			
		||||
        } else {
 | 
			
		||||
            res += 20;
 | 
			
		||||
        }
 | 
			
		||||
        if (9 == 9) {
 | 
			
		||||
            res += 100;
 | 
			
		||||
        }
 | 
			
		||||
        return res;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										22
									
								
								test/swftests/PrivateVoidCall.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								test/swftests/PrivateVoidCall.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,22 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: 9
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class PrivateVoidCall {
 | 
			
		||||
    public static function main():int{
 | 
			
		||||
        var f:OtherClass = new OtherClass();
 | 
			
		||||
        f.func();
 | 
			
		||||
        return 9;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
class OtherClass {
 | 
			
		||||
    private function pf():void {
 | 
			
		||||
        ;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public function func():void {
 | 
			
		||||
        this.pf();
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										11
									
								
								test/swftests/StringBasics.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringBasics.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: 3
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class StringBasics {
 | 
			
		||||
    public static function main():int{
 | 
			
		||||
        var s:String = "abc";
 | 
			
		||||
        return s.length;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										11
									
								
								test/swftests/StringCharCodeAt.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringCharCodeAt.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: 9897
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class StringCharCodeAt {
 | 
			
		||||
    public static function main():int{
 | 
			
		||||
        var s:String = "abc";
 | 
			
		||||
        return s.charCodeAt(1) * 100 + s.charCodeAt();
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										11
									
								
								test/swftests/StringConversion.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringConversion.as
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,11 @@
 | 
			
		||||
// input: []
 | 
			
		||||
// output: 2
 | 
			
		||||
 | 
			
		||||
package {
 | 
			
		||||
public class StringConversion {
 | 
			
		||||
    public static function main():int{
 | 
			
		||||
        var s:String = String(99);
 | 
			
		||||
        return s.length;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
@@ -266,6 +266,7 @@ class TestFormatSelection(unittest.TestCase):
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'width': None,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        def fname(templ):
 | 
			
		||||
            ydl = YoutubeDL({'outtmpl': templ})
 | 
			
		||||
            return ydl.prepare_filename(info)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
@@ -19,7 +20,7 @@ def _download_restricted(url, filename, age):
 | 
			
		||||
        'age_limit': age,
 | 
			
		||||
        'skip_download': True,
 | 
			
		||||
        'writeinfojson': True,
 | 
			
		||||
        "outtmpl": "%(id)s.%(ext)s",
 | 
			
		||||
        'outtmpl': '%(id)s.%(ext)s',
 | 
			
		||||
    }
 | 
			
		||||
    ydl = YoutubeDL(params)
 | 
			
		||||
    ydl.add_default_info_extractors()
 | 
			
		||||
 
 | 
			
		||||
@@ -32,19 +32,19 @@ class TestAllURLsMatching(unittest.TestCase):
 | 
			
		||||
    def test_youtube_playlist_matching(self):
 | 
			
		||||
        assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
 | 
			
		||||
        assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
 | 
			
		||||
        assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
 | 
			
		||||
        assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q')  # 585
 | 
			
		||||
        assertPlaylist('PL63F0C78739B09958')
 | 
			
		||||
        assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
 | 
			
		||||
        assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
 | 
			
		||||
        assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
 | 
			
		||||
        assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
 | 
			
		||||
        assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')  # 668
 | 
			
		||||
        self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
 | 
			
		||||
        # Top tracks
 | 
			
		||||
        assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
 | 
			
		||||
 | 
			
		||||
    def test_youtube_matching(self):
 | 
			
		||||
        self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
 | 
			
		||||
        self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
 | 
			
		||||
        self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012'))  # 668
 | 
			
		||||
        self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
 | 
			
		||||
        self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
 | 
			
		||||
        self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
 | 
			
		||||
 
 | 
			
		||||
@@ -26,11 +26,13 @@ class TestCompat(unittest.TestCase):
 | 
			
		||||
        self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
 | 
			
		||||
 | 
			
		||||
    def test_compat_expanduser(self):
 | 
			
		||||
        old_home = os.environ.get('HOME')
 | 
			
		||||
        test_str = 'C:\Documents and Settings\тест\Application Data'
 | 
			
		||||
        os.environ['HOME'] = (
 | 
			
		||||
            test_str if sys.version_info >= (3, 0)
 | 
			
		||||
            else test_str.encode(get_filesystem_encoding()))
 | 
			
		||||
        self.assertEqual(compat_expanduser('~'), test_str)
 | 
			
		||||
        os.environ['HOME'] = old_home
 | 
			
		||||
 | 
			
		||||
    def test_all_present(self):
 | 
			
		||||
        import youtube_dl.compat
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,7 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
@@ -38,18 +40,22 @@ from youtube_dl.extractor import get_info_extractor
 | 
			
		||||
 | 
			
		||||
RETRIES = 3
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class YoutubeDL(youtube_dl.YoutubeDL):
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        self.to_stderr = self.to_screen
 | 
			
		||||
        self.processed_info_dicts = []
 | 
			
		||||
        super(YoutubeDL, self).__init__(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
    def report_warning(self, message):
 | 
			
		||||
        # Don't accept warnings during tests
 | 
			
		||||
        raise ExtractorError(message)
 | 
			
		||||
 | 
			
		||||
    def process_info(self, info_dict):
 | 
			
		||||
        self.processed_info_dicts.append(info_dict)
 | 
			
		||||
        return super(YoutubeDL, self).process_info(info_dict)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _file_md5(fn):
 | 
			
		||||
    with open(fn, 'rb') as f:
 | 
			
		||||
        return hashlib.md5(f.read()).hexdigest()
 | 
			
		||||
@@ -59,10 +65,13 @@ defs = gettestcases()
 | 
			
		||||
 | 
			
		||||
class TestDownload(unittest.TestCase):
 | 
			
		||||
    maxDiff = None
 | 
			
		||||
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        self.defs = defs
 | 
			
		||||
 | 
			
		||||
### Dynamically generate tests
 | 
			
		||||
# Dynamically generate tests
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def generator(test_case):
 | 
			
		||||
 | 
			
		||||
    def test_template(self):
 | 
			
		||||
@@ -88,7 +97,7 @@ def generator(test_case):
 | 
			
		||||
            return
 | 
			
		||||
        for other_ie in other_ies:
 | 
			
		||||
            if not other_ie.working():
 | 
			
		||||
                print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
 | 
			
		||||
                print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
 | 
			
		||||
                return
 | 
			
		||||
 | 
			
		||||
        params = get_params(test_case.get('params', {}))
 | 
			
		||||
@@ -99,6 +108,7 @@ def generator(test_case):
 | 
			
		||||
        ydl = YoutubeDL(params, auto_init=False)
 | 
			
		||||
        ydl.add_default_info_extractors()
 | 
			
		||||
        finished_hook_called = set()
 | 
			
		||||
 | 
			
		||||
        def _hook(status):
 | 
			
		||||
            if status['status'] == 'finished':
 | 
			
		||||
                finished_hook_called.add(status['filename'])
 | 
			
		||||
@@ -109,6 +119,7 @@ def generator(test_case):
 | 
			
		||||
            return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
 | 
			
		||||
 | 
			
		||||
        res_dict = None
 | 
			
		||||
 | 
			
		||||
        def try_rm_tcs_files(tcs=None):
 | 
			
		||||
            if tcs is None:
 | 
			
		||||
                tcs = test_cases
 | 
			
		||||
@@ -132,7 +143,7 @@ def generator(test_case):
 | 
			
		||||
                        raise
 | 
			
		||||
 | 
			
		||||
                    if try_num == RETRIES:
 | 
			
		||||
                        report_warning(u'Failed due to network errors, skipping...')
 | 
			
		||||
                        report_warning('Failed due to network errors, skipping...')
 | 
			
		||||
                        return
 | 
			
		||||
 | 
			
		||||
                    print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
 | 
			
		||||
@@ -204,15 +215,15 @@ def generator(test_case):
 | 
			
		||||
 | 
			
		||||
    return test_template
 | 
			
		||||
 | 
			
		||||
### And add them to TestDownload
 | 
			
		||||
# And add them to TestDownload
 | 
			
		||||
for n, test_case in enumerate(defs):
 | 
			
		||||
    test_method = generator(test_case)
 | 
			
		||||
    tname = 'test_' + str(test_case['name'])
 | 
			
		||||
    i = 1
 | 
			
		||||
    while hasattr(TestDownload, tname):
 | 
			
		||||
        tname = 'test_'  + str(test_case['name']) + '_' + str(i)
 | 
			
		||||
        tname = 'test_%s_%d' % (test_case['name'], i)
 | 
			
		||||
        i += 1
 | 
			
		||||
    test_method.__name__ = tname
 | 
			
		||||
    test_method.__name__ = str(tname)
 | 
			
		||||
    setattr(TestDownload, test_method.__name__, test_method)
 | 
			
		||||
    del test_method
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,6 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import unittest
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
@@ -6,17 +9,19 @@ import subprocess
 | 
			
		||||
 | 
			
		||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    _DEV_NULL = subprocess.DEVNULL
 | 
			
		||||
except AttributeError:
 | 
			
		||||
    _DEV_NULL = open(os.devnull, 'wb')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestExecution(unittest.TestCase):
 | 
			
		||||
    def test_import(self):
 | 
			
		||||
        subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
 | 
			
		||||
 | 
			
		||||
    def test_module_exec(self):
 | 
			
		||||
        if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
 | 
			
		||||
        if sys.version_info >= (2, 7):  # Python 2.6 doesn't support package execution
 | 
			
		||||
            subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
 | 
			
		||||
 | 
			
		||||
    def test_main_exec(self):
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
@@ -22,6 +23,7 @@ from youtube_dl.extractor import (
 | 
			
		||||
class BaseTestSubtitles(unittest.TestCase):
 | 
			
		||||
    url = None
 | 
			
		||||
    IE = None
 | 
			
		||||
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        self.DL = FakeYDL()
 | 
			
		||||
        self.ie = self.IE(self.DL)
 | 
			
		||||
@@ -74,7 +76,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
 | 
			
		||||
 | 
			
		||||
    def test_youtube_list_subtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Video doesn\'t have automatic captions')
 | 
			
		||||
        self.DL.expect_warning('Video doesn\'t have automatic captions')
 | 
			
		||||
        self.DL.params['listsubtitles'] = True
 | 
			
		||||
        info_dict = self.getInfoDict()
 | 
			
		||||
        self.assertEqual(info_dict, None)
 | 
			
		||||
@@ -87,7 +89,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.assertTrue(subtitles['it'] is not None)
 | 
			
		||||
 | 
			
		||||
    def test_youtube_nosubtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'video doesn\'t have subtitles')
 | 
			
		||||
        self.DL.expect_warning('video doesn\'t have subtitles')
 | 
			
		||||
        self.url = 'n5BB19UTcdA'
 | 
			
		||||
        self.DL.params['writesubtitles'] = True
 | 
			
		||||
        self.DL.params['allsubtitles'] = True
 | 
			
		||||
@@ -101,7 +103,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.DL.params['subtitleslangs'] = langs
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
        for lang in langs:
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestDailymotionSubtitles(BaseTestSubtitles):
 | 
			
		||||
@@ -130,20 +132,20 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.assertEqual(len(subtitles.keys()), 5)
 | 
			
		||||
 | 
			
		||||
    def test_list_subtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['listsubtitles'] = True
 | 
			
		||||
        info_dict = self.getInfoDict()
 | 
			
		||||
        self.assertEqual(info_dict, None)
 | 
			
		||||
 | 
			
		||||
    def test_automatic_captions(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['writeautomaticsub'] = True
 | 
			
		||||
        self.DL.params['subtitleslang'] = ['en']
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
        self.assertTrue(len(subtitles.keys()) == 0)
 | 
			
		||||
 | 
			
		||||
    def test_nosubtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'video doesn\'t have subtitles')
 | 
			
		||||
        self.DL.expect_warning('video doesn\'t have subtitles')
 | 
			
		||||
        self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
 | 
			
		||||
        self.DL.params['writesubtitles'] = True
 | 
			
		||||
        self.DL.params['allsubtitles'] = True
 | 
			
		||||
@@ -156,7 +158,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.DL.params['subtitleslangs'] = langs
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
        for lang in langs:
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestTedSubtitles(BaseTestSubtitles):
 | 
			
		||||
@@ -185,13 +187,13 @@ class TestTedSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.assertTrue(len(subtitles.keys()) >= 28)
 | 
			
		||||
 | 
			
		||||
    def test_list_subtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['listsubtitles'] = True
 | 
			
		||||
        info_dict = self.getInfoDict()
 | 
			
		||||
        self.assertEqual(info_dict, None)
 | 
			
		||||
 | 
			
		||||
    def test_automatic_captions(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['writeautomaticsub'] = True
 | 
			
		||||
        self.DL.params['subtitleslang'] = ['en']
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
@@ -203,7 +205,7 @@ class TestTedSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.DL.params['subtitleslangs'] = langs
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
        for lang in langs:
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestBlipTVSubtitles(BaseTestSubtitles):
 | 
			
		||||
@@ -211,13 +213,13 @@ class TestBlipTVSubtitles(BaseTestSubtitles):
 | 
			
		||||
    IE = BlipTVIE
 | 
			
		||||
 | 
			
		||||
    def test_list_subtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['listsubtitles'] = True
 | 
			
		||||
        info_dict = self.getInfoDict()
 | 
			
		||||
        self.assertEqual(info_dict, None)
 | 
			
		||||
 | 
			
		||||
    def test_allsubtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['writesubtitles'] = True
 | 
			
		||||
        self.DL.params['allsubtitles'] = True
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
@@ -251,20 +253,20 @@ class TestVimeoSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
 | 
			
		||||
 | 
			
		||||
    def test_list_subtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['listsubtitles'] = True
 | 
			
		||||
        info_dict = self.getInfoDict()
 | 
			
		||||
        self.assertEqual(info_dict, None)
 | 
			
		||||
 | 
			
		||||
    def test_automatic_captions(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['writeautomaticsub'] = True
 | 
			
		||||
        self.DL.params['subtitleslang'] = ['en']
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
        self.assertTrue(len(subtitles.keys()) == 0)
 | 
			
		||||
 | 
			
		||||
    def test_nosubtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'video doesn\'t have subtitles')
 | 
			
		||||
        self.DL.expect_warning('video doesn\'t have subtitles')
 | 
			
		||||
        self.url = 'http://vimeo.com/56015672'
 | 
			
		||||
        self.DL.params['writesubtitles'] = True
 | 
			
		||||
        self.DL.params['allsubtitles'] = True
 | 
			
		||||
@@ -277,7 +279,7 @@ class TestVimeoSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.DL.params['subtitleslangs'] = langs
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
        for lang in langs:
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
            self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestWallaSubtitles(BaseTestSubtitles):
 | 
			
		||||
@@ -285,13 +287,13 @@ class TestWallaSubtitles(BaseTestSubtitles):
 | 
			
		||||
    IE = WallaIE
 | 
			
		||||
 | 
			
		||||
    def test_list_subtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['listsubtitles'] = True
 | 
			
		||||
        info_dict = self.getInfoDict()
 | 
			
		||||
        self.assertEqual(info_dict, None)
 | 
			
		||||
 | 
			
		||||
    def test_allsubtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.expect_warning('Automatic Captions not supported by this server')
 | 
			
		||||
        self.DL.params['writesubtitles'] = True
 | 
			
		||||
        self.DL.params['allsubtitles'] = True
 | 
			
		||||
        subtitles = self.getSubtitles()
 | 
			
		||||
@@ -299,7 +301,7 @@ class TestWallaSubtitles(BaseTestSubtitles):
 | 
			
		||||
        self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
 | 
			
		||||
 | 
			
		||||
    def test_nosubtitles(self):
 | 
			
		||||
        self.DL.expect_warning(u'video doesn\'t have subtitles')
 | 
			
		||||
        self.DL.expect_warning('video doesn\'t have subtitles')
 | 
			
		||||
        self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
 | 
			
		||||
        self.DL.params['writesubtitles'] = True
 | 
			
		||||
        self.DL.params['allsubtitles'] = True
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
 
 | 
			
		||||
@@ -9,14 +9,13 @@ rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 | 
			
		||||
 | 
			
		||||
IGNORED_FILES = [
 | 
			
		||||
    'setup.py',  # http://bugs.python.org/issue13943
 | 
			
		||||
    'conf.py',
 | 
			
		||||
    'buildserver.py',
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestUnicodeLiterals(unittest.TestCase):
 | 
			
		||||
    def test_all_files(self):
 | 
			
		||||
        print('Skipping this test (not yet fully implemented)')
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
        for dirpath, _, filenames in os.walk(rootDir):
 | 
			
		||||
            for basename in filenames:
 | 
			
		||||
                if not basename.endswith('.py'):
 | 
			
		||||
@@ -30,10 +29,10 @@ class TestUnicodeLiterals(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
                if "'" not in code and '"' not in code:
 | 
			
		||||
                    continue
 | 
			
		||||
                imps = 'from __future__ import unicode_literals'
 | 
			
		||||
                self.assertTrue(
 | 
			
		||||
                    imps in code,
 | 
			
		||||
                    ' %s  missing in %s' % (imps, fn))
 | 
			
		||||
                self.assertRegexpMatches(
 | 
			
		||||
                    code,
 | 
			
		||||
                    r'(?:#.*\n*)?from __future__ import (?:[a-z_]+,\s*)*unicode_literals',
 | 
			
		||||
                    'unicode_literals import  missing in %s' % fn)
 | 
			
		||||
 | 
			
		||||
                m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
 | 
			
		||||
                if m is not None:
 | 
			
		||||
 
 | 
			
		||||
@@ -45,8 +45,9 @@ from youtube_dl.utils import (
 | 
			
		||||
    escape_rfc3986,
 | 
			
		||||
    escape_url,
 | 
			
		||||
    js_to_json,
 | 
			
		||||
    get_filesystem_encoding,
 | 
			
		||||
    intlist_to_bytes,
 | 
			
		||||
    args_to_str,
 | 
			
		||||
    parse_filesize,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -119,16 +120,16 @@ class TestUtil(unittest.TestCase):
 | 
			
		||||
        self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
 | 
			
		||||
        self.assertEqual(orderedSet([]), [])
 | 
			
		||||
        self.assertEqual(orderedSet([1]), [1])
 | 
			
		||||
        #keep the list ordered
 | 
			
		||||
        # keep the list ordered
 | 
			
		||||
        self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
 | 
			
		||||
 | 
			
		||||
    def test_unescape_html(self):
 | 
			
		||||
        self.assertEqual(unescapeHTML('%20;'), '%20;')
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
            unescapeHTML('é'), 'é')
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
    def test_daterange(self):
 | 
			
		||||
        _20century = DateRange("19000101","20000101")
 | 
			
		||||
        _20century = DateRange("19000101", "20000101")
 | 
			
		||||
        self.assertFalse("17890714" in _20century)
 | 
			
		||||
        _ac = DateRange("00010101")
 | 
			
		||||
        self.assertTrue("19690721" in _ac)
 | 
			
		||||
@@ -170,7 +171,7 @@ class TestUtil(unittest.TestCase):
 | 
			
		||||
        self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
 | 
			
		||||
 | 
			
		||||
    def test_smuggle_url(self):
 | 
			
		||||
        data = {u"ö": u"ö", u"abc": [3]}
 | 
			
		||||
        data = {"ö": "ö", "abc": [3]}
 | 
			
		||||
        url = 'https://foo.bar/baz?x=y#a'
 | 
			
		||||
        smug_url = smuggle_url(url, data)
 | 
			
		||||
        unsmug_url, unsmug_data = unsmuggle_url(smug_url)
 | 
			
		||||
@@ -218,6 +219,7 @@ class TestUtil(unittest.TestCase):
 | 
			
		||||
        self.assertEqual(parse_duration('0m0s'), 0)
 | 
			
		||||
        self.assertEqual(parse_duration('0s'), 0)
 | 
			
		||||
        self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
 | 
			
		||||
        self.assertEqual(parse_duration('T30M38S'), 1838)
 | 
			
		||||
 | 
			
		||||
    def test_fix_xml_ampersands(self):
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
@@ -360,5 +362,20 @@ class TestUtil(unittest.TestCase):
 | 
			
		||||
            intlist_to_bytes([0, 1, 127, 128, 255]),
 | 
			
		||||
            b'\x00\x01\x7f\x80\xff')
 | 
			
		||||
 | 
			
		||||
    def test_args_to_str(self):
 | 
			
		||||
        self.assertEqual(
 | 
			
		||||
            args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
 | 
			
		||||
            'foo ba/r -baz \'2 be\' \'\''
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def test_parse_filesize(self):
 | 
			
		||||
        self.assertEqual(parse_filesize(None), None)
 | 
			
		||||
        self.assertEqual(parse_filesize(''), None)
 | 
			
		||||
        self.assertEqual(parse_filesize('91 B'), 91)
 | 
			
		||||
        self.assertEqual(parse_filesize('foobar'), None)
 | 
			
		||||
        self.assertEqual(parse_filesize('2 MiB'), 2097152)
 | 
			
		||||
        self.assertEqual(parse_filesize('5 GB'), 5000000000)
 | 
			
		||||
        self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,6 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
@@ -31,19 +32,18 @@ params = get_params({
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
TEST_ID = 'gr51aVj-mLg'
 | 
			
		||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
 | 
			
		||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TestAnnotations(unittest.TestCase):
 | 
			
		||||
    def setUp(self):
 | 
			
		||||
        # Clear old files
 | 
			
		||||
        self.tearDown()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def test_info_json(self):
 | 
			
		||||
        expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
 | 
			
		||||
        expected = list(EXPECTED_ANNOTATIONS)  # Two annotations could have the same text.
 | 
			
		||||
        ie = youtube_dl.extractor.YoutubeIE()
 | 
			
		||||
        ydl = YoutubeDL(params)
 | 
			
		||||
        ydl.add_info_extractor(ie)
 | 
			
		||||
@@ -51,7 +51,7 @@ class TestAnnotations(unittest.TestCase):
 | 
			
		||||
        self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
 | 
			
		||||
        annoxml = None
 | 
			
		||||
        with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
 | 
			
		||||
                annoxml = xml.etree.ElementTree.parse(annof)
 | 
			
		||||
            annoxml = xml.etree.ElementTree.parse(annof)
 | 
			
		||||
        self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
 | 
			
		||||
        root = annoxml.getroot()
 | 
			
		||||
        self.assertEqual(root.tag, 'document')
 | 
			
		||||
@@ -59,18 +59,17 @@ class TestAnnotations(unittest.TestCase):
 | 
			
		||||
        self.assertEqual(annotationsTag.tag, 'annotations')
 | 
			
		||||
        annotations = annotationsTag.findall('annotation')
 | 
			
		||||
 | 
			
		||||
        #Not all the annotations have TEXT children and the annotations are returned unsorted.
 | 
			
		||||
        # Not all the annotations have TEXT children and the annotations are returned unsorted.
 | 
			
		||||
        for a in annotations:
 | 
			
		||||
                self.assertEqual(a.tag, 'annotation')
 | 
			
		||||
                if a.get('type') == 'text':
 | 
			
		||||
                        textTag = a.find('TEXT')
 | 
			
		||||
                        text = textTag.text
 | 
			
		||||
                        self.assertTrue(text in expected) #assertIn only added in python 2.7
 | 
			
		||||
                        #remove the first occurance, there could be more than one annotation with the same text
 | 
			
		||||
                        expected.remove(text)
 | 
			
		||||
        #We should have seen (and removed) all the expected annotation texts.
 | 
			
		||||
            self.assertEqual(a.tag, 'annotation')
 | 
			
		||||
            if a.get('type') == 'text':
 | 
			
		||||
                textTag = a.find('TEXT')
 | 
			
		||||
                text = textTag.text
 | 
			
		||||
                self.assertTrue(text in expected)  # assertIn only added in python 2.7
 | 
			
		||||
                # remove the first occurance, there could be more than one annotation with the same text
 | 
			
		||||
                expected.remove(text)
 | 
			
		||||
        # We should have seen (and removed) all the expected annotation texts.
 | 
			
		||||
        self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
    def tearDown(self):
 | 
			
		||||
        try_rm(ANNOTATIONS_FILE)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,6 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
@@ -32,7 +33,7 @@ params = get_params({
 | 
			
		||||
TEST_ID = 'BaW_jenozKc'
 | 
			
		||||
INFO_JSON_FILE = TEST_ID + '.info.json'
 | 
			
		||||
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
 | 
			
		||||
EXPECTED_DESCRIPTION = u'''test chars:  "'/\ä↭𝕐
 | 
			
		||||
EXPECTED_DESCRIPTION = '''test chars:  "'/\ä↭𝕐
 | 
			
		||||
test URL: https://github.com/rg3/youtube-dl/issues/1892
 | 
			
		||||
 | 
			
		||||
This is a test video for youtube-dl.
 | 
			
		||||
@@ -53,11 +54,11 @@ class TestInfoJSON(unittest.TestCase):
 | 
			
		||||
        self.assertTrue(os.path.exists(INFO_JSON_FILE))
 | 
			
		||||
        with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
 | 
			
		||||
            jd = json.load(jsonf)
 | 
			
		||||
        self.assertEqual(jd['upload_date'], u'20121002')
 | 
			
		||||
        self.assertEqual(jd['upload_date'], '20121002')
 | 
			
		||||
        self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
 | 
			
		||||
        self.assertEqual(jd['id'], TEST_ID)
 | 
			
		||||
        self.assertEqual(jd['extractor'], 'youtube')
 | 
			
		||||
        self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
 | 
			
		||||
        self.assertEqual(jd['title'], '''youtube-dl test video "'/\ä↭𝕐''')
 | 
			
		||||
        self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
 | 
			
		||||
 | 
			
		||||
        self.assertTrue(os.path.exists(DESCRIPTION_FILE))
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Allow direct execution
 | 
			
		||||
import os
 | 
			
		||||
@@ -12,10 +13,6 @@ from test.helper import FakeYDL
 | 
			
		||||
from youtube_dl.extractor import (
 | 
			
		||||
    YoutubePlaylistIE,
 | 
			
		||||
    YoutubeIE,
 | 
			
		||||
    YoutubeChannelIE,
 | 
			
		||||
    YoutubeShowIE,
 | 
			
		||||
    YoutubeTopListIE,
 | 
			
		||||
    YoutubeSearchURLIE,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -31,7 +28,7 @@ class TestYoutubeLists(unittest.TestCase):
 | 
			
		||||
        result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
 | 
			
		||||
        self.assertEqual(result['_type'], 'url')
 | 
			
		||||
        self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    def test_youtube_course(self):
 | 
			
		||||
        dl = FakeYDL()
 | 
			
		||||
        ie = YoutubePlaylistIE(dl)
 | 
			
		||||
 
 | 
			
		||||
@@ -60,6 +60,7 @@ from .utils import (
 | 
			
		||||
    write_string,
 | 
			
		||||
    YoutubeDLHandler,
 | 
			
		||||
    prepend_extension,
 | 
			
		||||
    args_to_str,
 | 
			
		||||
)
 | 
			
		||||
from .cache import Cache
 | 
			
		||||
from .extractor import get_info_extractor, gen_extractors
 | 
			
		||||
@@ -253,6 +254,22 @@ class YoutubeDL(object):
 | 
			
		||||
            self.print_debug_header()
 | 
			
		||||
            self.add_default_info_extractors()
 | 
			
		||||
 | 
			
		||||
    def warn_if_short_id(self, argv):
 | 
			
		||||
        # short YouTube ID starting with dash?
 | 
			
		||||
        idxs = [
 | 
			
		||||
            i for i, a in enumerate(argv)
 | 
			
		||||
            if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
 | 
			
		||||
        if idxs:
 | 
			
		||||
            correct_argv = (
 | 
			
		||||
                ['youtube-dl'] +
 | 
			
		||||
                [a for i, a in enumerate(argv) if i not in idxs] +
 | 
			
		||||
                ['--'] + [argv[i] for i in idxs]
 | 
			
		||||
            )
 | 
			
		||||
            self.report_warning(
 | 
			
		||||
                'Long argument string detected. '
 | 
			
		||||
                'Use -- to separate parameters and URLs, like this:\n%s\n' %
 | 
			
		||||
                args_to_str(correct_argv))
 | 
			
		||||
 | 
			
		||||
    def add_info_extractor(self, ie):
 | 
			
		||||
        """Add an InfoExtractor object to the end of the list."""
 | 
			
		||||
        self._ies.append(ie)
 | 
			
		||||
@@ -297,7 +314,7 @@ class YoutubeDL(object):
 | 
			
		||||
        self._output_process.stdin.write((message + '\n').encode('utf-8'))
 | 
			
		||||
        self._output_process.stdin.flush()
 | 
			
		||||
        res = ''.join(self._output_channel.readline().decode('utf-8')
 | 
			
		||||
                       for _ in range(line_count))
 | 
			
		||||
                      for _ in range(line_count))
 | 
			
		||||
        return res[:-len('\n')]
 | 
			
		||||
 | 
			
		||||
    def to_screen(self, message, skip_eol=False):
 | 
			
		||||
@@ -534,7 +551,7 @@ class YoutubeDL(object):
 | 
			
		||||
 | 
			
		||||
            try:
 | 
			
		||||
                ie_result = ie.extract(url)
 | 
			
		||||
                if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
 | 
			
		||||
                if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
 | 
			
		||||
                    break
 | 
			
		||||
                if isinstance(ie_result, list):
 | 
			
		||||
                    # Backwards compatibility: old IE result format
 | 
			
		||||
@@ -547,7 +564,7 @@ class YoutubeDL(object):
 | 
			
		||||
                    return self.process_ie_result(ie_result, download, extra_info)
 | 
			
		||||
                else:
 | 
			
		||||
                    return ie_result
 | 
			
		||||
            except ExtractorError as de: # An error we somewhat expected
 | 
			
		||||
            except ExtractorError as de:  # An error we somewhat expected
 | 
			
		||||
                self.report_error(compat_str(de), de.format_traceback())
 | 
			
		||||
                break
 | 
			
		||||
            except MaxDownloadsReached:
 | 
			
		||||
@@ -624,7 +641,7 @@ class YoutubeDL(object):
 | 
			
		||||
 | 
			
		||||
            return self.process_ie_result(
 | 
			
		||||
                new_result, download=download, extra_info=extra_info)
 | 
			
		||||
        elif result_type == 'playlist':
 | 
			
		||||
        elif result_type == 'playlist' or result_type == 'multi_video':
 | 
			
		||||
            # We process each entry in the playlist
 | 
			
		||||
            playlist = ie_result.get('title', None) or ie_result.get('id', None)
 | 
			
		||||
            self.to_screen('[download] Downloading playlist: %s' % playlist)
 | 
			
		||||
@@ -679,14 +696,20 @@ class YoutubeDL(object):
 | 
			
		||||
            ie_result['entries'] = playlist_results
 | 
			
		||||
            return ie_result
 | 
			
		||||
        elif result_type == 'compat_list':
 | 
			
		||||
            self.report_warning(
 | 
			
		||||
                'Extractor %s returned a compat_list result. '
 | 
			
		||||
                'It needs to be updated.' % ie_result.get('extractor'))
 | 
			
		||||
 | 
			
		||||
            def _fixup(r):
 | 
			
		||||
                self.add_extra_info(r,
 | 
			
		||||
                self.add_extra_info(
 | 
			
		||||
                    r,
 | 
			
		||||
                    {
 | 
			
		||||
                        'extractor': ie_result['extractor'],
 | 
			
		||||
                        'webpage_url': ie_result['webpage_url'],
 | 
			
		||||
                        'webpage_url_basename': url_basename(ie_result['webpage_url']),
 | 
			
		||||
                        'extractor_key': ie_result['extractor_key'],
 | 
			
		||||
                    })
 | 
			
		||||
                    }
 | 
			
		||||
                )
 | 
			
		||||
                return r
 | 
			
		||||
            ie_result['entries'] = [
 | 
			
		||||
                self.process_ie_result(_fixup(r), download, extra_info)
 | 
			
		||||
@@ -836,14 +859,14 @@ class YoutubeDL(object):
 | 
			
		||||
                        # Two formats have been requested like '137+139'
 | 
			
		||||
                        format_1, format_2 = rf.split('+')
 | 
			
		||||
                        formats_info = (self.select_format(format_1, formats),
 | 
			
		||||
                            self.select_format(format_2, formats))
 | 
			
		||||
                                        self.select_format(format_2, formats))
 | 
			
		||||
                        if all(formats_info):
 | 
			
		||||
                            # The first format must contain the video and the
 | 
			
		||||
                            # second the audio
 | 
			
		||||
                            if formats_info[0].get('vcodec') == 'none':
 | 
			
		||||
                                self.report_error('The first format must '
 | 
			
		||||
                                    'contain the video, try using '
 | 
			
		||||
                                    '"-f %s+%s"' % (format_2, format_1))
 | 
			
		||||
                                                  'contain the video, try using '
 | 
			
		||||
                                                  '"-f %s+%s"' % (format_2, format_1))
 | 
			
		||||
                                return
 | 
			
		||||
                            selected_format = {
 | 
			
		||||
                                'requested_formats': formats_info,
 | 
			
		||||
@@ -989,7 +1012,7 @@ class YoutubeDL(object):
 | 
			
		||||
                    else:
 | 
			
		||||
                        self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
 | 
			
		||||
                        with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
 | 
			
		||||
                                subfile.write(sub)
 | 
			
		||||
                            subfile.write(sub)
 | 
			
		||||
                except (OSError, IOError):
 | 
			
		||||
                    self.report_error('Cannot write subtitles file ' + sub_filename)
 | 
			
		||||
                    return
 | 
			
		||||
@@ -1001,7 +1024,7 @@ class YoutubeDL(object):
 | 
			
		||||
            else:
 | 
			
		||||
                self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
 | 
			
		||||
                try:
 | 
			
		||||
                    write_json_file(info_dict, encodeFilename(infofn))
 | 
			
		||||
                    write_json_file(info_dict, infofn)
 | 
			
		||||
                except (OSError, IOError):
 | 
			
		||||
                    self.report_error('Cannot write metadata to JSON file ' + infofn)
 | 
			
		||||
                    return
 | 
			
		||||
@@ -1021,10 +1044,10 @@ class YoutubeDL(object):
 | 
			
		||||
                        with open(thumb_filename, 'wb') as thumbf:
 | 
			
		||||
                            shutil.copyfileobj(uf, thumbf)
 | 
			
		||||
                        self.to_screen('[%s] %s: Writing thumbnail to: %s' %
 | 
			
		||||
                            (info_dict['extractor'], info_dict['id'], thumb_filename))
 | 
			
		||||
                                       (info_dict['extractor'], info_dict['id'], thumb_filename))
 | 
			
		||||
                    except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 | 
			
		||||
                        self.report_warning('Unable to download thumbnail "%s": %s' %
 | 
			
		||||
                            (info_dict['thumbnail'], compat_str(err)))
 | 
			
		||||
                                            (info_dict['thumbnail'], compat_str(err)))
 | 
			
		||||
 | 
			
		||||
        if not self.params.get('skip_download', False):
 | 
			
		||||
            if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
 | 
			
		||||
@@ -1045,8 +1068,8 @@ class YoutubeDL(object):
 | 
			
		||||
                        if not merger._executable:
 | 
			
		||||
                            postprocessors = []
 | 
			
		||||
                            self.report_warning('You have requested multiple '
 | 
			
		||||
                                'formats but ffmpeg or avconv are not installed.'
 | 
			
		||||
                                ' The formats won\'t be merged')
 | 
			
		||||
                                                'formats but ffmpeg or avconv are not installed.'
 | 
			
		||||
                                                ' The formats won\'t be merged')
 | 
			
		||||
                        else:
 | 
			
		||||
                            postprocessors = [merger]
 | 
			
		||||
                        for f in info_dict['requested_formats']:
 | 
			
		||||
@@ -1090,7 +1113,7 @@ class YoutubeDL(object):
 | 
			
		||||
 | 
			
		||||
        for url in url_list:
 | 
			
		||||
            try:
 | 
			
		||||
                #It also downloads the videos
 | 
			
		||||
                # It also downloads the videos
 | 
			
		||||
                res = self.extract_info(url)
 | 
			
		||||
            except UnavailableVideoError:
 | 
			
		||||
                self.report_error('unable to download video')
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,8 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
# -*- coding: utf-8 -*-
 | 
			
		||||
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
__license__ = 'Public Domain'
 | 
			
		||||
 | 
			
		||||
import codecs
 | 
			
		||||
@@ -17,6 +19,7 @@ from .compat import (
 | 
			
		||||
    compat_expanduser,
 | 
			
		||||
    compat_getpass,
 | 
			
		||||
    compat_print,
 | 
			
		||||
    workaround_optparse_bug9161,
 | 
			
		||||
)
 | 
			
		||||
from .utils import (
 | 
			
		||||
    DateRange,
 | 
			
		||||
@@ -55,7 +58,9 @@ def _real_main(argv=None):
 | 
			
		||||
        # https://github.com/rg3/youtube-dl/issues/820
 | 
			
		||||
        codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
 | 
			
		||||
 | 
			
		||||
    setproctitle(u'youtube-dl')
 | 
			
		||||
    workaround_optparse_bug9161()
 | 
			
		||||
 | 
			
		||||
    setproctitle('youtube-dl')
 | 
			
		||||
 | 
			
		||||
    parser, opts, args = parseOpts(argv)
 | 
			
		||||
 | 
			
		||||
@@ -71,10 +76,10 @@ def _real_main(argv=None):
 | 
			
		||||
    if opts.headers is not None:
 | 
			
		||||
        for h in opts.headers:
 | 
			
		||||
            if h.find(':', 1) < 0:
 | 
			
		||||
                parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h)
 | 
			
		||||
                parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
 | 
			
		||||
            key, value = h.split(':', 2)
 | 
			
		||||
            if opts.verbose:
 | 
			
		||||
                write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value))
 | 
			
		||||
                write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
 | 
			
		||||
            std_headers[key] = value
 | 
			
		||||
 | 
			
		||||
    # Dump user agent
 | 
			
		||||
@@ -92,9 +97,9 @@ def _real_main(argv=None):
 | 
			
		||||
                batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
 | 
			
		||||
            batch_urls = read_batch_urls(batchfd)
 | 
			
		||||
            if opts.verbose:
 | 
			
		||||
                write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n')
 | 
			
		||||
                write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
 | 
			
		||||
        except IOError:
 | 
			
		||||
            sys.exit(u'ERROR: batch file could not be read')
 | 
			
		||||
            sys.exit('ERROR: batch file could not be read')
 | 
			
		||||
    all_urls = batch_urls + args
 | 
			
		||||
    all_urls = [url.strip() for url in all_urls]
 | 
			
		||||
    _enc = preferredencoding()
 | 
			
		||||
@@ -107,7 +112,7 @@ def _real_main(argv=None):
 | 
			
		||||
            compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
 | 
			
		||||
            matchedUrls = [url for url in all_urls if ie.suitable(url)]
 | 
			
		||||
            for mu in matchedUrls:
 | 
			
		||||
                compat_print(u'  ' + mu)
 | 
			
		||||
                compat_print('  ' + mu)
 | 
			
		||||
        sys.exit(0)
 | 
			
		||||
    if opts.list_extractor_descriptions:
 | 
			
		||||
        for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
 | 
			
		||||
@@ -117,63 +122,62 @@ def _real_main(argv=None):
 | 
			
		||||
            if desc is False:
 | 
			
		||||
                continue
 | 
			
		||||
            if hasattr(ie, 'SEARCH_KEY'):
 | 
			
		||||
                _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny')
 | 
			
		||||
                _COUNTS = (u'', u'5', u'10', u'all')
 | 
			
		||||
                desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
 | 
			
		||||
                _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny')
 | 
			
		||||
                _COUNTS = ('', '5', '10', 'all')
 | 
			
		||||
                desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
 | 
			
		||||
            compat_print(desc)
 | 
			
		||||
        sys.exit(0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Conflicting, missing and erroneous options
 | 
			
		||||
    if opts.usenetrc and (opts.username is not None or opts.password is not None):
 | 
			
		||||
        parser.error(u'using .netrc conflicts with giving username/password')
 | 
			
		||||
        parser.error('using .netrc conflicts with giving username/password')
 | 
			
		||||
    if opts.password is not None and opts.username is None:
 | 
			
		||||
        parser.error(u'account username missing\n')
 | 
			
		||||
        parser.error('account username missing\n')
 | 
			
		||||
    if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
 | 
			
		||||
        parser.error(u'using output template conflicts with using title, video ID or auto number')
 | 
			
		||||
        parser.error('using output template conflicts with using title, video ID or auto number')
 | 
			
		||||
    if opts.usetitle and opts.useid:
 | 
			
		||||
        parser.error(u'using title conflicts with using video ID')
 | 
			
		||||
        parser.error('using title conflicts with using video ID')
 | 
			
		||||
    if opts.username is not None and opts.password is None:
 | 
			
		||||
        opts.password = compat_getpass(u'Type account password and press [Return]: ')
 | 
			
		||||
        opts.password = compat_getpass('Type account password and press [Return]: ')
 | 
			
		||||
    if opts.ratelimit is not None:
 | 
			
		||||
        numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
 | 
			
		||||
        if numeric_limit is None:
 | 
			
		||||
            parser.error(u'invalid rate limit specified')
 | 
			
		||||
            parser.error('invalid rate limit specified')
 | 
			
		||||
        opts.ratelimit = numeric_limit
 | 
			
		||||
    if opts.min_filesize is not None:
 | 
			
		||||
        numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
 | 
			
		||||
        if numeric_limit is None:
 | 
			
		||||
            parser.error(u'invalid min_filesize specified')
 | 
			
		||||
            parser.error('invalid min_filesize specified')
 | 
			
		||||
        opts.min_filesize = numeric_limit
 | 
			
		||||
    if opts.max_filesize is not None:
 | 
			
		||||
        numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
 | 
			
		||||
        if numeric_limit is None:
 | 
			
		||||
            parser.error(u'invalid max_filesize specified')
 | 
			
		||||
            parser.error('invalid max_filesize specified')
 | 
			
		||||
        opts.max_filesize = numeric_limit
 | 
			
		||||
    if opts.retries is not None:
 | 
			
		||||
        try:
 | 
			
		||||
            opts.retries = int(opts.retries)
 | 
			
		||||
        except (TypeError, ValueError):
 | 
			
		||||
            parser.error(u'invalid retry count specified')
 | 
			
		||||
            parser.error('invalid retry count specified')
 | 
			
		||||
    if opts.buffersize is not None:
 | 
			
		||||
        numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
 | 
			
		||||
        if numeric_buffersize is None:
 | 
			
		||||
            parser.error(u'invalid buffer size specified')
 | 
			
		||||
            parser.error('invalid buffer size specified')
 | 
			
		||||
        opts.buffersize = numeric_buffersize
 | 
			
		||||
    if opts.playliststart <= 0:
 | 
			
		||||
        raise ValueError(u'Playlist start must be positive')
 | 
			
		||||
        raise ValueError('Playlist start must be positive')
 | 
			
		||||
    if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
 | 
			
		||||
        raise ValueError(u'Playlist end must be greater than playlist start')
 | 
			
		||||
        raise ValueError('Playlist end must be greater than playlist start')
 | 
			
		||||
    if opts.extractaudio:
 | 
			
		||||
        if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
 | 
			
		||||
            parser.error(u'invalid audio format specified')
 | 
			
		||||
            parser.error('invalid audio format specified')
 | 
			
		||||
    if opts.audioquality:
 | 
			
		||||
        opts.audioquality = opts.audioquality.strip('k').strip('K')
 | 
			
		||||
        if not opts.audioquality.isdigit():
 | 
			
		||||
            parser.error(u'invalid audio quality specified')
 | 
			
		||||
            parser.error('invalid audio quality specified')
 | 
			
		||||
    if opts.recodevideo is not None:
 | 
			
		||||
        if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
 | 
			
		||||
            parser.error(u'invalid video recode format specified')
 | 
			
		||||
            parser.error('invalid video recode format specified')
 | 
			
		||||
    if opts.date is not None:
 | 
			
		||||
        date = DateRange.day(opts.date)
 | 
			
		||||
    else:
 | 
			
		||||
@@ -185,25 +189,25 @@ def _real_main(argv=None):
 | 
			
		||||
 | 
			
		||||
    # --all-sub automatically sets --write-sub if --write-auto-sub is not given
 | 
			
		||||
    # this was the old behaviour if only --all-sub was given.
 | 
			
		||||
    if opts.allsubtitles and (opts.writeautomaticsub == False):
 | 
			
		||||
    if opts.allsubtitles and not opts.writeautomaticsub:
 | 
			
		||||
        opts.writesubtitles = True
 | 
			
		||||
 | 
			
		||||
    if sys.version_info < (3,):
 | 
			
		||||
        # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
 | 
			
		||||
        if opts.outtmpl is not None:
 | 
			
		||||
            opts.outtmpl = opts.outtmpl.decode(preferredencoding())
 | 
			
		||||
    outtmpl =((opts.outtmpl is not None and opts.outtmpl)
 | 
			
		||||
            or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
 | 
			
		||||
            or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
 | 
			
		||||
            or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
 | 
			
		||||
            or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
 | 
			
		||||
            or (opts.useid and u'%(id)s.%(ext)s')
 | 
			
		||||
            or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
 | 
			
		||||
            or DEFAULT_OUTTMPL)
 | 
			
		||||
    outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
 | 
			
		||||
               or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
 | 
			
		||||
               or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
 | 
			
		||||
               or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
 | 
			
		||||
               or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
 | 
			
		||||
               or (opts.useid and '%(id)s.%(ext)s')
 | 
			
		||||
               or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
 | 
			
		||||
               or DEFAULT_OUTTMPL)
 | 
			
		||||
    if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
 | 
			
		||||
        parser.error(u'Cannot download a video and extract audio into the same'
 | 
			
		||||
                     u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
 | 
			
		||||
                     u' template'.format(outtmpl))
 | 
			
		||||
        parser.error('Cannot download a video and extract audio into the same'
 | 
			
		||||
                     ' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
 | 
			
		||||
                     ' template'.format(outtmpl))
 | 
			
		||||
 | 
			
		||||
    any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
 | 
			
		||||
    download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
 | 
			
		||||
@@ -312,7 +316,6 @@ def _real_main(argv=None):
 | 
			
		||||
                ydl.add_post_processor(FFmpegAudioFixPP())
 | 
			
		||||
            ydl.add_post_processor(AtomicParsleyPP())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
 | 
			
		||||
        # So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
 | 
			
		||||
        if opts.exec_cmd:
 | 
			
		||||
@@ -329,18 +332,19 @@ def _real_main(argv=None):
 | 
			
		||||
 | 
			
		||||
        # Maybe do nothing
 | 
			
		||||
        if (len(all_urls) < 1) and (opts.load_info_filename is None):
 | 
			
		||||
            if not (opts.update_self or opts.rm_cachedir):
 | 
			
		||||
                parser.error(u'you must provide at least one URL')
 | 
			
		||||
            else:
 | 
			
		||||
            if opts.update_self or opts.rm_cachedir:
 | 
			
		||||
                sys.exit()
 | 
			
		||||
 | 
			
		||||
            ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
 | 
			
		||||
            parser.error('you must provide at least one URL')
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            if opts.load_info_filename is not None:
 | 
			
		||||
                retcode = ydl.download_with_info_file(opts.load_info_filename)
 | 
			
		||||
            else:
 | 
			
		||||
                retcode = ydl.download(all_urls)
 | 
			
		||||
        except MaxDownloadsReached:
 | 
			
		||||
            ydl.to_screen(u'--max-download limit reached, aborting.')
 | 
			
		||||
            ydl.to_screen('--max-download limit reached, aborting.')
 | 
			
		||||
            retcode = 101
 | 
			
		||||
 | 
			
		||||
    sys.exit(retcode)
 | 
			
		||||
@@ -352,6 +356,6 @@ def main(argv=None):
 | 
			
		||||
    except DownloadError:
 | 
			
		||||
        sys.exit(1)
 | 
			
		||||
    except SameFileError:
 | 
			
		||||
        sys.exit(u'ERROR: fixed output name but more than one file to download')
 | 
			
		||||
        sys.exit('ERROR: fixed output name but more than one file to download')
 | 
			
		||||
    except KeyboardInterrupt:
 | 
			
		||||
        sys.exit(u'\nERROR: Interrupted by user')
 | 
			
		||||
        sys.exit('\nERROR: Interrupted by user')
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
#!/usr/bin/env python
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
# Execute with
 | 
			
		||||
# $ python youtube_dl/__main__.py (2.6+)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,5 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
 | 
			
		||||
 | 
			
		||||
import base64
 | 
			
		||||
@@ -7,10 +9,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes
 | 
			
		||||
 | 
			
		||||
BLOCK_SIZE_BYTES = 16
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def aes_ctr_decrypt(data, key, counter):
 | 
			
		||||
    """
 | 
			
		||||
    Decrypt with aes in counter mode
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    @param {int[]} data        cipher
 | 
			
		||||
    @param {int[]} key         16/24/32-Byte cipher key
 | 
			
		||||
    @param {instance} counter  Instance whose next_value function (@returns {int[]}  16-Byte block)
 | 
			
		||||
@@ -19,23 +22,24 @@ def aes_ctr_decrypt(data, key, counter):
 | 
			
		||||
    """
 | 
			
		||||
    expanded_key = key_expansion(key)
 | 
			
		||||
    block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
 | 
			
		||||
    
 | 
			
		||||
    decrypted_data=[]
 | 
			
		||||
 | 
			
		||||
    decrypted_data = []
 | 
			
		||||
    for i in range(block_count):
 | 
			
		||||
        counter_block = counter.next_value()
 | 
			
		||||
        block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
 | 
			
		||||
        block += [0]*(BLOCK_SIZE_BYTES - len(block))
 | 
			
		||||
        
 | 
			
		||||
        block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
 | 
			
		||||
        block += [0] * (BLOCK_SIZE_BYTES - len(block))
 | 
			
		||||
 | 
			
		||||
        cipher_counter_block = aes_encrypt(counter_block, expanded_key)
 | 
			
		||||
        decrypted_data += xor(block, cipher_counter_block)
 | 
			
		||||
    decrypted_data = decrypted_data[:len(data)]
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    return decrypted_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def aes_cbc_decrypt(data, key, iv):
 | 
			
		||||
    """
 | 
			
		||||
    Decrypt with aes in CBC mode
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    @param {int[]} data        cipher
 | 
			
		||||
    @param {int[]} key         16/24/32-Byte cipher key
 | 
			
		||||
    @param {int[]} iv          16-Byte IV
 | 
			
		||||
@@ -43,94 +47,98 @@ def aes_cbc_decrypt(data, key, iv):
 | 
			
		||||
    """
 | 
			
		||||
    expanded_key = key_expansion(key)
 | 
			
		||||
    block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
 | 
			
		||||
    
 | 
			
		||||
    decrypted_data=[]
 | 
			
		||||
 | 
			
		||||
    decrypted_data = []
 | 
			
		||||
    previous_cipher_block = iv
 | 
			
		||||
    for i in range(block_count):
 | 
			
		||||
        block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
 | 
			
		||||
        block += [0]*(BLOCK_SIZE_BYTES - len(block))
 | 
			
		||||
        
 | 
			
		||||
        block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
 | 
			
		||||
        block += [0] * (BLOCK_SIZE_BYTES - len(block))
 | 
			
		||||
 | 
			
		||||
        decrypted_block = aes_decrypt(block, expanded_key)
 | 
			
		||||
        decrypted_data += xor(decrypted_block, previous_cipher_block)
 | 
			
		||||
        previous_cipher_block = block
 | 
			
		||||
    decrypted_data = decrypted_data[:len(data)]
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    return decrypted_data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def key_expansion(data):
 | 
			
		||||
    """
 | 
			
		||||
    Generate key schedule
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    @param {int[]} data  16/24/32-Byte cipher key
 | 
			
		||||
    @returns {int[]}     176/208/240-Byte expanded key 
 | 
			
		||||
    @returns {int[]}     176/208/240-Byte expanded key
 | 
			
		||||
    """
 | 
			
		||||
    data = data[:] # copy
 | 
			
		||||
    data = data[:]  # copy
 | 
			
		||||
    rcon_iteration = 1
 | 
			
		||||
    key_size_bytes = len(data)
 | 
			
		||||
    expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    while len(data) < expanded_key_size_bytes:
 | 
			
		||||
        temp = data[-4:]
 | 
			
		||||
        temp = key_schedule_core(temp, rcon_iteration)
 | 
			
		||||
        rcon_iteration += 1
 | 
			
		||||
        data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
 | 
			
		||||
        
 | 
			
		||||
        data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
 | 
			
		||||
 | 
			
		||||
        for _ in range(3):
 | 
			
		||||
            temp = data[-4:]
 | 
			
		||||
            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
 | 
			
		||||
        
 | 
			
		||||
            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
 | 
			
		||||
 | 
			
		||||
        if key_size_bytes == 32:
 | 
			
		||||
            temp = data[-4:]
 | 
			
		||||
            temp = sub_bytes(temp)
 | 
			
		||||
            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
 | 
			
		||||
        
 | 
			
		||||
        for _ in range(3 if key_size_bytes == 32  else 2 if key_size_bytes == 24 else 0):
 | 
			
		||||
            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
 | 
			
		||||
 | 
			
		||||
        for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
 | 
			
		||||
            temp = data[-4:]
 | 
			
		||||
            data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
 | 
			
		||||
            data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
 | 
			
		||||
    data = data[:expanded_key_size_bytes]
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    return data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def aes_encrypt(data, expanded_key):
 | 
			
		||||
    """
 | 
			
		||||
    Encrypt one block with aes
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    @param {int[]} data          16-Byte state
 | 
			
		||||
    @param {int[]} expanded_key  176/208/240-Byte expanded key 
 | 
			
		||||
    @param {int[]} expanded_key  176/208/240-Byte expanded key
 | 
			
		||||
    @returns {int[]}             16-Byte cipher
 | 
			
		||||
    """
 | 
			
		||||
    rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
 | 
			
		||||
 | 
			
		||||
    data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
 | 
			
		||||
    for i in range(1, rounds+1):
 | 
			
		||||
    for i in range(1, rounds + 1):
 | 
			
		||||
        data = sub_bytes(data)
 | 
			
		||||
        data = shift_rows(data)
 | 
			
		||||
        if i != rounds:
 | 
			
		||||
            data = mix_columns(data)
 | 
			
		||||
        data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
 | 
			
		||||
        data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
 | 
			
		||||
 | 
			
		||||
    return data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def aes_decrypt(data, expanded_key):
 | 
			
		||||
    """
 | 
			
		||||
    Decrypt one block with aes
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    @param {int[]} data          16-Byte cipher
 | 
			
		||||
    @param {int[]} expanded_key  176/208/240-Byte expanded key
 | 
			
		||||
    @returns {int[]}             16-Byte state
 | 
			
		||||
    """
 | 
			
		||||
    rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    for i in range(rounds, 0, -1):
 | 
			
		||||
        data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
 | 
			
		||||
        data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
 | 
			
		||||
        if i != rounds:
 | 
			
		||||
            data = mix_columns_inv(data)
 | 
			
		||||
        data = shift_rows_inv(data)
 | 
			
		||||
        data = sub_bytes_inv(data)
 | 
			
		||||
    data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    return data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def aes_decrypt_text(data, password, key_size_bytes):
 | 
			
		||||
    """
 | 
			
		||||
    Decrypt text
 | 
			
		||||
@@ -138,33 +146,34 @@ def aes_decrypt_text(data, password, key_size_bytes):
 | 
			
		||||
    - The cipher key is retrieved by encrypting the first 16 Byte of 'password'
 | 
			
		||||
      with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
 | 
			
		||||
    - Mode of operation is 'counter'
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    @param {str} data                    Base64 encoded string
 | 
			
		||||
    @param {str,unicode} password        Password (will be encoded with utf-8)
 | 
			
		||||
    @param {int} key_size_bytes          Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
 | 
			
		||||
    @returns {str}                       Decrypted data
 | 
			
		||||
    """
 | 
			
		||||
    NONCE_LENGTH_BYTES = 8
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    data = bytes_to_intlist(base64.b64decode(data))
 | 
			
		||||
    password = bytes_to_intlist(password.encode('utf-8'))
 | 
			
		||||
    
 | 
			
		||||
    key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
 | 
			
		||||
 | 
			
		||||
    key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
 | 
			
		||||
    key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    nonce = data[:NONCE_LENGTH_BYTES]
 | 
			
		||||
    cipher = data[NONCE_LENGTH_BYTES:]
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    class Counter:
 | 
			
		||||
        __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
 | 
			
		||||
        __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
 | 
			
		||||
 | 
			
		||||
        def next_value(self):
 | 
			
		||||
            temp = self.__value
 | 
			
		||||
            self.__value = inc(self.__value)
 | 
			
		||||
            return temp
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
 | 
			
		||||
    plaintext = intlist_to_bytes(decrypted_data)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    return plaintext
 | 
			
		||||
 | 
			
		||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
 | 
			
		||||
@@ -200,14 +209,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
 | 
			
		||||
            0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
 | 
			
		||||
            0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
 | 
			
		||||
            0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
 | 
			
		||||
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
 | 
			
		||||
                     (0x1,0x2,0x3,0x1),
 | 
			
		||||
                     (0x1,0x1,0x2,0x3),
 | 
			
		||||
                     (0x3,0x1,0x1,0x2))
 | 
			
		||||
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
 | 
			
		||||
                         (0x9,0xE,0xB,0xD),
 | 
			
		||||
                         (0xD,0x9,0xE,0xB),
 | 
			
		||||
                         (0xB,0xD,0x9,0xE))
 | 
			
		||||
MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
 | 
			
		||||
                     (0x1, 0x2, 0x3, 0x1),
 | 
			
		||||
                     (0x1, 0x1, 0x2, 0x3),
 | 
			
		||||
                     (0x3, 0x1, 0x1, 0x2))
 | 
			
		||||
MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
 | 
			
		||||
                         (0x9, 0xE, 0xB, 0xD),
 | 
			
		||||
                         (0xD, 0x9, 0xE, 0xB),
 | 
			
		||||
                         (0xB, 0xD, 0x9, 0xE))
 | 
			
		||||
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
 | 
			
		||||
                      0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
 | 
			
		||||
                      0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
 | 
			
		||||
@@ -241,30 +250,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
 | 
			
		||||
                      0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
 | 
			
		||||
                      0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sub_bytes(data):
 | 
			
		||||
    return [SBOX[x] for x in data]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sub_bytes_inv(data):
 | 
			
		||||
    return [SBOX_INV[x] for x in data]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def rotate(data):
 | 
			
		||||
    return data[1:] + [data[0]]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def key_schedule_core(data, rcon_iteration):
 | 
			
		||||
    data = rotate(data)
 | 
			
		||||
    data = sub_bytes(data)
 | 
			
		||||
    data[0] = data[0] ^ RCON[rcon_iteration]
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    return data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def xor(data1, data2):
 | 
			
		||||
    return [x^y for x, y in zip(data1, data2)]
 | 
			
		||||
    return [x ^ y for x, y in zip(data1, data2)]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def rijndael_mul(a, b):
 | 
			
		||||
    if(a==0 or b==0):
 | 
			
		||||
    if(a == 0 or b == 0):
 | 
			
		||||
        return 0
 | 
			
		||||
    return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mix_column(data, matrix):
 | 
			
		||||
    data_mixed = []
 | 
			
		||||
    for row in range(4):
 | 
			
		||||
@@ -275,33 +291,38 @@ def mix_column(data, matrix):
 | 
			
		||||
        data_mixed.append(mixed)
 | 
			
		||||
    return data_mixed
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
 | 
			
		||||
    data_mixed = []
 | 
			
		||||
    for i in range(4):
 | 
			
		||||
        column = data[i*4 : (i+1)*4]
 | 
			
		||||
        column = data[i * 4: (i + 1) * 4]
 | 
			
		||||
        data_mixed += mix_column(column, matrix)
 | 
			
		||||
    return data_mixed
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mix_columns_inv(data):
 | 
			
		||||
    return mix_columns(data, MIX_COLUMN_MATRIX_INV)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def shift_rows(data):
 | 
			
		||||
    data_shifted = []
 | 
			
		||||
    for column in range(4):
 | 
			
		||||
        for row in range(4):
 | 
			
		||||
            data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
 | 
			
		||||
            data_shifted.append(data[((column + row) & 0b11) * 4 + row])
 | 
			
		||||
    return data_shifted
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def shift_rows_inv(data):
 | 
			
		||||
    data_shifted = []
 | 
			
		||||
    for column in range(4):
 | 
			
		||||
        for row in range(4):
 | 
			
		||||
            data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
 | 
			
		||||
            data_shifted.append(data[((column - row) & 0b11) * 4 + row])
 | 
			
		||||
    return data_shifted
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def inc(data):
 | 
			
		||||
    data = data[:] # copy
 | 
			
		||||
    for i in range(len(data)-1,-1,-1):
 | 
			
		||||
    data = data[:]  # copy
 | 
			
		||||
    for i in range(len(data) - 1, -1, -1):
 | 
			
		||||
        if data[i] == 255:
 | 
			
		||||
            data[i] = 0
 | 
			
		||||
        else:
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,7 @@ import re
 | 
			
		||||
import shutil
 | 
			
		||||
import traceback
 | 
			
		||||
 | 
			
		||||
from .compat import compat_expanduser
 | 
			
		||||
from .compat import compat_expanduser, compat_getenv
 | 
			
		||||
from .utils import write_json_file
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -19,7 +19,7 @@ class Cache(object):
 | 
			
		||||
    def _get_root_dir(self):
 | 
			
		||||
        res = self._ydl.params.get('cachedir')
 | 
			
		||||
        if res is None:
 | 
			
		||||
            cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache')
 | 
			
		||||
            cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
 | 
			
		||||
            res = os.path.join(cache_root, 'youtube-dl')
 | 
			
		||||
        return compat_expanduser(res)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,54 +1,56 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import getpass
 | 
			
		||||
import optparse
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.request as compat_urllib_request
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import urllib2 as compat_urllib_request
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.error as compat_urllib_error
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import urllib2 as compat_urllib_error
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.parse as compat_urllib_parse
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import urllib as compat_urllib_parse
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from urllib.parse import urlparse as compat_urllib_parse_urlparse
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    from urlparse import urlparse as compat_urllib_parse_urlparse
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import urllib.parse as compat_urlparse
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import urlparse as compat_urlparse
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import http.cookiejar as compat_cookiejar
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import cookielib as compat_cookiejar
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import html.entities as compat_html_entities
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import htmlentitydefs as compat_html_entities
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import html.parser as compat_html_parser
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import HTMLParser as compat_html_parser
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import http.client as compat_http_client
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    import httplib as compat_http_client
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
@@ -109,12 +111,12 @@ except ImportError:
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from urllib.parse import parse_qs as compat_parse_qs
 | 
			
		||||
except ImportError: # Python 2
 | 
			
		||||
except ImportError:  # Python 2
 | 
			
		||||
    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
 | 
			
		||||
    # Python 2's version is apparently totally broken
 | 
			
		||||
 | 
			
		||||
    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
 | 
			
		||||
                encoding='utf-8', errors='replace'):
 | 
			
		||||
                   encoding='utf-8', errors='replace'):
 | 
			
		||||
        qs, _coerce_result = qs, unicode
 | 
			
		||||
        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
 | 
			
		||||
        r = []
 | 
			
		||||
@@ -143,10 +145,10 @@ except ImportError: # Python 2
 | 
			
		||||
        return r
 | 
			
		||||
 | 
			
		||||
    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
 | 
			
		||||
                encoding='utf-8', errors='replace'):
 | 
			
		||||
                        encoding='utf-8', errors='replace'):
 | 
			
		||||
        parsed_result = {}
 | 
			
		||||
        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
 | 
			
		||||
                        encoding=encoding, errors=errors)
 | 
			
		||||
                           encoding=encoding, errors=errors)
 | 
			
		||||
        for name, value in pairs:
 | 
			
		||||
            if name in parsed_result:
 | 
			
		||||
                parsed_result[name].append(value)
 | 
			
		||||
@@ -155,12 +157,12 @@ except ImportError: # Python 2
 | 
			
		||||
        return parsed_result
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    compat_str = unicode # Python 2
 | 
			
		||||
    compat_str = unicode  # Python 2
 | 
			
		||||
except NameError:
 | 
			
		||||
    compat_str = str
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    compat_chr = unichr # Python 2
 | 
			
		||||
    compat_chr = unichr  # Python 2
 | 
			
		||||
except NameError:
 | 
			
		||||
    compat_chr = chr
 | 
			
		||||
 | 
			
		||||
@@ -173,12 +175,17 @@ try:
 | 
			
		||||
    from shlex import quote as shlex_quote
 | 
			
		||||
except ImportError:  # Python < 3.3
 | 
			
		||||
    def shlex_quote(s):
 | 
			
		||||
        return "'" + s.replace("'", "'\"'\"'") + "'"
 | 
			
		||||
        if re.match(r'^[-_\w./]+$', s):
 | 
			
		||||
            return s
 | 
			
		||||
        else:
 | 
			
		||||
            return "'" + s.replace("'", "'\"'\"'") + "'"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def compat_ord(c):
 | 
			
		||||
    if type(c) is int: return c
 | 
			
		||||
    else: return ord(c)
 | 
			
		||||
    if type(c) is int:
 | 
			
		||||
        return c
 | 
			
		||||
    else:
 | 
			
		||||
        return ord(c)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if sys.version_info >= (3, 0):
 | 
			
		||||
@@ -249,7 +256,7 @@ else:
 | 
			
		||||
                    drive = ''
 | 
			
		||||
                userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
 | 
			
		||||
 | 
			
		||||
            if i != 1: #~user
 | 
			
		||||
            if i != 1:  # ~user
 | 
			
		||||
                userhome = os.path.join(os.path.dirname(userhome), path[1:i])
 | 
			
		||||
 | 
			
		||||
            return userhome + path[i:]
 | 
			
		||||
@@ -263,7 +270,7 @@ if sys.version_info < (3, 0):
 | 
			
		||||
        print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
 | 
			
		||||
else:
 | 
			
		||||
    def compat_print(s):
 | 
			
		||||
        assert type(s) == type(u'')
 | 
			
		||||
        assert isinstance(s, compat_str)
 | 
			
		||||
        print(s)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -288,6 +295,36 @@ if sys.version_info < (3, 0) and sys.platform == 'win32':
 | 
			
		||||
else:
 | 
			
		||||
    compat_getpass = getpass.getpass
 | 
			
		||||
 | 
			
		||||
# Old 2.6 and 2.7 releases require kwargs to be bytes
 | 
			
		||||
try:
 | 
			
		||||
    (lambda x: x)(**{'x': 0})
 | 
			
		||||
except TypeError:
 | 
			
		||||
    def compat_kwargs(kwargs):
 | 
			
		||||
        return dict((bytes(k), v) for k, v in kwargs.items())
 | 
			
		||||
else:
 | 
			
		||||
    compat_kwargs = lambda kwargs: kwargs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Fix https://github.com/rg3/youtube-dl/issues/4223
 | 
			
		||||
# See http://bugs.python.org/issue9161 for what is broken
 | 
			
		||||
def workaround_optparse_bug9161():
 | 
			
		||||
    op = optparse.OptionParser()
 | 
			
		||||
    og = optparse.OptionGroup(op, 'foo')
 | 
			
		||||
    try:
 | 
			
		||||
        og.add_option('-t')
 | 
			
		||||
    except TypeError:
 | 
			
		||||
        real_add_option = optparse.OptionGroup.add_option
 | 
			
		||||
 | 
			
		||||
        def _compat_add_option(self, *args, **kwargs):
 | 
			
		||||
            enc = lambda v: (
 | 
			
		||||
                v.encode('ascii', 'replace') if isinstance(v, compat_str)
 | 
			
		||||
                else v)
 | 
			
		||||
            bargs = [enc(a) for a in args]
 | 
			
		||||
            bkwargs = dict(
 | 
			
		||||
                (k, enc(v)) for k, v in kwargs.items())
 | 
			
		||||
            return real_add_option(self, *bargs, **bkwargs)
 | 
			
		||||
        optparse.OptionGroup.add_option = _compat_add_option
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'compat_HTTPError',
 | 
			
		||||
@@ -299,6 +336,7 @@ __all__ = [
 | 
			
		||||
    'compat_html_entities',
 | 
			
		||||
    'compat_html_parser',
 | 
			
		||||
    'compat_http_client',
 | 
			
		||||
    'compat_kwargs',
 | 
			
		||||
    'compat_ord',
 | 
			
		||||
    'compat_parse_qs',
 | 
			
		||||
    'compat_print',
 | 
			
		||||
@@ -314,4 +352,5 @@ __all__ = [
 | 
			
		||||
    'compat_xml_parse_error',
 | 
			
		||||
    'shlex_quote',
 | 
			
		||||
    'subprocess_check_output',
 | 
			
		||||
    'workaround_optparse_bug9161',
 | 
			
		||||
]
 | 
			
		||||
 
 | 
			
		||||
@@ -30,3 +30,8 @@ def get_suitable_downloader(info_dict):
 | 
			
		||||
        return F4mFD
 | 
			
		||||
    else:
 | 
			
		||||
        return HttpFD
 | 
			
		||||
 | 
			
		||||
__all__ = [
 | 
			
		||||
    'get_suitable_downloader',
 | 
			
		||||
    'FileDownloader',
 | 
			
		||||
]
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,5 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import sys
 | 
			
		||||
@@ -79,7 +81,7 @@ class FileDownloader(object):
 | 
			
		||||
        if total is None:
 | 
			
		||||
            return None
 | 
			
		||||
        dif = now - start
 | 
			
		||||
        if current == 0 or dif < 0.001: # One millisecond
 | 
			
		||||
        if current == 0 or dif < 0.001:  # One millisecond
 | 
			
		||||
            return None
 | 
			
		||||
        rate = float(current) / dif
 | 
			
		||||
        return int((float(total) - float(current)) / rate)
 | 
			
		||||
@@ -93,7 +95,7 @@ class FileDownloader(object):
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def calc_speed(start, now, bytes):
 | 
			
		||||
        dif = now - start
 | 
			
		||||
        if bytes == 0 or dif < 0.001: # One millisecond
 | 
			
		||||
        if bytes == 0 or dif < 0.001:  # One millisecond
 | 
			
		||||
            return None
 | 
			
		||||
        return float(bytes) / dif
 | 
			
		||||
 | 
			
		||||
@@ -106,7 +108,7 @@ class FileDownloader(object):
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def best_block_size(elapsed_time, bytes):
 | 
			
		||||
        new_min = max(bytes / 2.0, 1.0)
 | 
			
		||||
        new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
 | 
			
		||||
        new_max = min(max(bytes * 2.0, 1.0), 4194304)  # Do not surpass 4 MB
 | 
			
		||||
        if elapsed_time < 0.001:
 | 
			
		||||
            return int(new_max)
 | 
			
		||||
        rate = bytes / elapsed_time
 | 
			
		||||
@@ -159,14 +161,14 @@ class FileDownloader(object):
 | 
			
		||||
 | 
			
		||||
    def temp_name(self, filename):
 | 
			
		||||
        """Returns a temporary filename for the given filename."""
 | 
			
		||||
        if self.params.get('nopart', False) or filename == u'-' or \
 | 
			
		||||
        if self.params.get('nopart', False) or filename == '-' or \
 | 
			
		||||
                (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
 | 
			
		||||
            return filename
 | 
			
		||||
        return filename + u'.part'
 | 
			
		||||
        return filename + '.part'
 | 
			
		||||
 | 
			
		||||
    def undo_temp_name(self, filename):
 | 
			
		||||
        if filename.endswith(u'.part'):
 | 
			
		||||
            return filename[:-len(u'.part')]
 | 
			
		||||
        if filename.endswith('.part'):
 | 
			
		||||
            return filename[:-len('.part')]
 | 
			
		||||
        return filename
 | 
			
		||||
 | 
			
		||||
    def try_rename(self, old_filename, new_filename):
 | 
			
		||||
@@ -175,7 +177,7 @@ class FileDownloader(object):
 | 
			
		||||
                return
 | 
			
		||||
            os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
 | 
			
		||||
        except (IOError, OSError) as err:
 | 
			
		||||
            self.report_error(u'unable to rename file: %s' % compat_str(err))
 | 
			
		||||
            self.report_error('unable to rename file: %s' % compat_str(err))
 | 
			
		||||
 | 
			
		||||
    def try_utime(self, filename, last_modified_hdr):
 | 
			
		||||
        """Try to set the last-modified time of the given file."""
 | 
			
		||||
@@ -200,10 +202,10 @@ class FileDownloader(object):
 | 
			
		||||
 | 
			
		||||
    def report_destination(self, filename):
 | 
			
		||||
        """Report destination filename."""
 | 
			
		||||
        self.to_screen(u'[download] Destination: ' + filename)
 | 
			
		||||
        self.to_screen('[download] Destination: ' + filename)
 | 
			
		||||
 | 
			
		||||
    def _report_progress_status(self, msg, is_last_line=False):
 | 
			
		||||
        fullmsg = u'[download] ' + msg
 | 
			
		||||
        fullmsg = '[download] ' + msg
 | 
			
		||||
        if self.params.get('progress_with_newline', False):
 | 
			
		||||
            self.to_screen(fullmsg)
 | 
			
		||||
        else:
 | 
			
		||||
@@ -211,13 +213,13 @@ class FileDownloader(object):
 | 
			
		||||
                prev_len = getattr(self, '_report_progress_prev_line_length',
 | 
			
		||||
                                   0)
 | 
			
		||||
                if prev_len > len(fullmsg):
 | 
			
		||||
                    fullmsg += u' ' * (prev_len - len(fullmsg))
 | 
			
		||||
                    fullmsg += ' ' * (prev_len - len(fullmsg))
 | 
			
		||||
                self._report_progress_prev_line_length = len(fullmsg)
 | 
			
		||||
                clear_line = u'\r'
 | 
			
		||||
                clear_line = '\r'
 | 
			
		||||
            else:
 | 
			
		||||
                clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
 | 
			
		||||
                clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
 | 
			
		||||
            self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
 | 
			
		||||
        self.to_console_title(u'youtube-dl ' + msg)
 | 
			
		||||
        self.to_console_title('youtube-dl ' + msg)
 | 
			
		||||
 | 
			
		||||
    def report_progress(self, percent, data_len_str, speed, eta):
 | 
			
		||||
        """Report download progress."""
 | 
			
		||||
@@ -233,7 +235,7 @@ class FileDownloader(object):
 | 
			
		||||
            percent_str = 'Unknown %'
 | 
			
		||||
        speed_str = self.format_speed(speed)
 | 
			
		||||
 | 
			
		||||
        msg = (u'%s of %s at %s ETA %s' %
 | 
			
		||||
        msg = ('%s of %s at %s ETA %s' %
 | 
			
		||||
               (percent_str, data_len_str, speed_str, eta_str))
 | 
			
		||||
        self._report_progress_status(msg)
 | 
			
		||||
 | 
			
		||||
@@ -243,37 +245,37 @@ class FileDownloader(object):
 | 
			
		||||
        downloaded_str = format_bytes(downloaded_data_len)
 | 
			
		||||
        speed_str = self.format_speed(speed)
 | 
			
		||||
        elapsed_str = FileDownloader.format_seconds(elapsed)
 | 
			
		||||
        msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
 | 
			
		||||
        msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
 | 
			
		||||
        self._report_progress_status(msg)
 | 
			
		||||
 | 
			
		||||
    def report_finish(self, data_len_str, tot_time):
 | 
			
		||||
        """Report download finished."""
 | 
			
		||||
        if self.params.get('noprogress', False):
 | 
			
		||||
            self.to_screen(u'[download] Download completed')
 | 
			
		||||
            self.to_screen('[download] Download completed')
 | 
			
		||||
        else:
 | 
			
		||||
            self._report_progress_status(
 | 
			
		||||
                (u'100%% of %s in %s' %
 | 
			
		||||
                ('100%% of %s in %s' %
 | 
			
		||||
                 (data_len_str, self.format_seconds(tot_time))),
 | 
			
		||||
                is_last_line=True)
 | 
			
		||||
 | 
			
		||||
    def report_resuming_byte(self, resume_len):
 | 
			
		||||
        """Report attempt to resume at given byte."""
 | 
			
		||||
        self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
 | 
			
		||||
        self.to_screen('[download] Resuming download at byte %s' % resume_len)
 | 
			
		||||
 | 
			
		||||
    def report_retry(self, count, retries):
 | 
			
		||||
        """Report retry in case of HTTP error 5xx"""
 | 
			
		||||
        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
 | 
			
		||||
        self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
 | 
			
		||||
 | 
			
		||||
    def report_file_already_downloaded(self, file_name):
 | 
			
		||||
        """Report file has already been fully downloaded."""
 | 
			
		||||
        try:
 | 
			
		||||
            self.to_screen(u'[download] %s has already been downloaded' % file_name)
 | 
			
		||||
            self.to_screen('[download] %s has already been downloaded' % file_name)
 | 
			
		||||
        except UnicodeEncodeError:
 | 
			
		||||
            self.to_screen(u'[download] The file has already been downloaded')
 | 
			
		||||
            self.to_screen('[download] The file has already been downloaded')
 | 
			
		||||
 | 
			
		||||
    def report_unable_to_resume(self):
 | 
			
		||||
        """Report it was impossible to resume download."""
 | 
			
		||||
        self.to_screen(u'[download] Unable to resume')
 | 
			
		||||
        self.to_screen('[download] Unable to resume')
 | 
			
		||||
 | 
			
		||||
    def download(self, filename, info_dict):
 | 
			
		||||
        """Download to a filename using the info from info_dict
 | 
			
		||||
@@ -293,7 +295,7 @@ class FileDownloader(object):
 | 
			
		||||
 | 
			
		||||
    def real_download(self, filename, info_dict):
 | 
			
		||||
        """Real download process. Redefine in subclasses."""
 | 
			
		||||
        raise NotImplementedError(u'This method must be implemented by subclasses')
 | 
			
		||||
        raise NotImplementedError('This method must be implemented by subclasses')
 | 
			
		||||
 | 
			
		||||
    def _hook_progress(self, status):
 | 
			
		||||
        for ph in self._progress_hooks:
 | 
			
		||||
 
 | 
			
		||||
@@ -55,7 +55,7 @@ class FlvReader(io.BytesIO):
 | 
			
		||||
        if size == 1:
 | 
			
		||||
            real_size = self.read_unsigned_long_long()
 | 
			
		||||
            header_end = 16
 | 
			
		||||
        return real_size, box_type, self.read(real_size-header_end)
 | 
			
		||||
        return real_size, box_type, self.read(real_size - header_end)
 | 
			
		||||
 | 
			
		||||
    def read_asrt(self):
 | 
			
		||||
        # version
 | 
			
		||||
@@ -180,7 +180,7 @@ def build_fragments_list(boot_info):
 | 
			
		||||
    n_frags = segment_run_entry[1]
 | 
			
		||||
    fragment_run_entry_table = boot_info['fragments'][0]['fragments']
 | 
			
		||||
    first_frag_number = fragment_run_entry_table[0]['first']
 | 
			
		||||
    for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
 | 
			
		||||
    for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
 | 
			
		||||
        res.append((1, frag_number))
 | 
			
		||||
    return res
 | 
			
		||||
 | 
			
		||||
@@ -225,13 +225,15 @@ class F4mFD(FileDownloader):
 | 
			
		||||
        self.to_screen('[download] Downloading f4m manifest')
 | 
			
		||||
        manifest = self.ydl.urlopen(man_url).read()
 | 
			
		||||
        self.report_destination(filename)
 | 
			
		||||
        http_dl = HttpQuietDownloader(self.ydl,
 | 
			
		||||
        http_dl = HttpQuietDownloader(
 | 
			
		||||
            self.ydl,
 | 
			
		||||
            {
 | 
			
		||||
                'continuedl': True,
 | 
			
		||||
                'quiet': True,
 | 
			
		||||
                'noprogress': True,
 | 
			
		||||
                'test': self.params.get('test', False),
 | 
			
		||||
            })
 | 
			
		||||
            }
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        doc = etree.fromstring(manifest)
 | 
			
		||||
        formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
 | 
			
		||||
@@ -277,7 +279,7 @@ class F4mFD(FileDownloader):
 | 
			
		||||
        def frag_progress_hook(status):
 | 
			
		||||
            frag_total_bytes = status.get('total_bytes', 0)
 | 
			
		||||
            estimated_size = (state['downloaded_bytes'] +
 | 
			
		||||
                (total_frags - state['frag_counter']) * frag_total_bytes)
 | 
			
		||||
                              (total_frags - state['frag_counter']) * frag_total_bytes)
 | 
			
		||||
            if status['status'] == 'finished':
 | 
			
		||||
                state['downloaded_bytes'] += frag_total_bytes
 | 
			
		||||
                state['frag_counter'] += 1
 | 
			
		||||
@@ -287,13 +289,13 @@ class F4mFD(FileDownloader):
 | 
			
		||||
                frag_downloaded_bytes = status['downloaded_bytes']
 | 
			
		||||
                byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
 | 
			
		||||
                frag_progress = self.calc_percent(frag_downloaded_bytes,
 | 
			
		||||
                    frag_total_bytes)
 | 
			
		||||
                                                  frag_total_bytes)
 | 
			
		||||
                progress = self.calc_percent(state['frag_counter'], total_frags)
 | 
			
		||||
                progress += frag_progress / float(total_frags)
 | 
			
		||||
 | 
			
		||||
            eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
 | 
			
		||||
            self.report_progress(progress, format_bytes(estimated_size),
 | 
			
		||||
                status.get('speed'), eta)
 | 
			
		||||
                                 status.get('speed'), eta)
 | 
			
		||||
        http_dl.add_progress_hook(frag_progress_hook)
 | 
			
		||||
 | 
			
		||||
        frags_filenames = []
 | 
			
		||||
 
 | 
			
		||||
@@ -28,14 +28,14 @@ class HlsFD(FileDownloader):
 | 
			
		||||
            if check_executable(program, ['-version']):
 | 
			
		||||
                break
 | 
			
		||||
        else:
 | 
			
		||||
            self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
 | 
			
		||||
            self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
 | 
			
		||||
            return False
 | 
			
		||||
        cmd = [program] + args
 | 
			
		||||
 | 
			
		||||
        retval = subprocess.call(cmd)
 | 
			
		||||
        if retval == 0:
 | 
			
		||||
            fsize = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
            self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
 | 
			
		||||
            self.to_screen('\r[%s] %s bytes' % (cmd[0], fsize))
 | 
			
		||||
            self.try_rename(tmpfilename, filename)
 | 
			
		||||
            self._hook_progress({
 | 
			
		||||
                'downloaded_bytes': fsize,
 | 
			
		||||
@@ -45,8 +45,8 @@ class HlsFD(FileDownloader):
 | 
			
		||||
            })
 | 
			
		||||
            return True
 | 
			
		||||
        else:
 | 
			
		||||
            self.to_stderr(u"\n")
 | 
			
		||||
            self.report_error(u'%s exited with code %d' % (program, retval))
 | 
			
		||||
            self.to_stderr('\n')
 | 
			
		||||
            self.report_error('%s exited with code %d' % (program, retval))
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -101,4 +101,3 @@ class NativeHlsFD(FileDownloader):
 | 
			
		||||
        })
 | 
			
		||||
        self.try_rename(tmpfilename, filename)
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,5 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
@@ -106,7 +108,7 @@ class HttpFD(FileDownloader):
 | 
			
		||||
                self.report_retry(count, retries)
 | 
			
		||||
 | 
			
		||||
        if count > retries:
 | 
			
		||||
            self.report_error(u'giving up after %s retries' % retries)
 | 
			
		||||
            self.report_error('giving up after %s retries' % retries)
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        data_len = data.info().get('Content-length', None)
 | 
			
		||||
@@ -124,10 +126,10 @@ class HttpFD(FileDownloader):
 | 
			
		||||
            min_data_len = self.params.get("min_filesize", None)
 | 
			
		||||
            max_data_len = self.params.get("max_filesize", None)
 | 
			
		||||
            if min_data_len is not None and data_len < min_data_len:
 | 
			
		||||
                self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
 | 
			
		||||
                self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
 | 
			
		||||
                return False
 | 
			
		||||
            if max_data_len is not None and data_len > max_data_len:
 | 
			
		||||
                self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
 | 
			
		||||
                self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
 | 
			
		||||
                return False
 | 
			
		||||
 | 
			
		||||
        data_len_str = format_bytes(data_len)
 | 
			
		||||
@@ -151,13 +153,13 @@ class HttpFD(FileDownloader):
 | 
			
		||||
                    filename = self.undo_temp_name(tmpfilename)
 | 
			
		||||
                    self.report_destination(filename)
 | 
			
		||||
                except (OSError, IOError) as err:
 | 
			
		||||
                    self.report_error(u'unable to open for writing: %s' % str(err))
 | 
			
		||||
                    self.report_error('unable to open for writing: %s' % str(err))
 | 
			
		||||
                    return False
 | 
			
		||||
            try:
 | 
			
		||||
                stream.write(data_block)
 | 
			
		||||
            except (IOError, OSError) as err:
 | 
			
		||||
                self.to_stderr(u"\n")
 | 
			
		||||
                self.report_error(u'unable to write data: %s' % str(err))
 | 
			
		||||
                self.to_stderr('\n')
 | 
			
		||||
                self.report_error('unable to write data: %s' % str(err))
 | 
			
		||||
                return False
 | 
			
		||||
            if not self.params.get('noresizebuffer', False):
 | 
			
		||||
                block_size = self.best_block_size(after - before, len(data_block))
 | 
			
		||||
@@ -188,10 +190,10 @@ class HttpFD(FileDownloader):
 | 
			
		||||
            self.slow_down(start, byte_counter - resume_len)
 | 
			
		||||
 | 
			
		||||
        if stream is None:
 | 
			
		||||
            self.to_stderr(u"\n")
 | 
			
		||||
            self.report_error(u'Did not get any data blocks')
 | 
			
		||||
            self.to_stderr('\n')
 | 
			
		||||
            self.report_error('Did not get any data blocks')
 | 
			
		||||
            return False
 | 
			
		||||
        if tmpfilename != u'-':
 | 
			
		||||
        if tmpfilename != '-':
 | 
			
		||||
            stream.close()
 | 
			
		||||
        self.report_finish(data_len_str, (time.time() - start))
 | 
			
		||||
        if data_len is not None and byte_counter != data_len:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,10 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import subprocess
 | 
			
		||||
 | 
			
		||||
from .common import FileDownloader
 | 
			
		||||
from ..compat import compat_subprocess_get_DEVNULL
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    encodeFilename,
 | 
			
		||||
)
 | 
			
		||||
@@ -13,19 +16,23 @@ class MplayerFD(FileDownloader):
 | 
			
		||||
        self.report_destination(filename)
 | 
			
		||||
        tmpfilename = self.temp_name(filename)
 | 
			
		||||
 | 
			
		||||
        args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
 | 
			
		||||
        args = [
 | 
			
		||||
            'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy',
 | 
			
		||||
            '-dumpstream', '-dumpfile', tmpfilename, url]
 | 
			
		||||
        # Check for mplayer first
 | 
			
		||||
        try:
 | 
			
		||||
            subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
 | 
			
		||||
            subprocess.call(
 | 
			
		||||
                ['mplayer', '-h'],
 | 
			
		||||
                stdout=compat_subprocess_get_DEVNULL(), stderr=subprocess.STDOUT)
 | 
			
		||||
        except (OSError, IOError):
 | 
			
		||||
            self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0])
 | 
			
		||||
            self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0])
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        # Download using mplayer.
 | 
			
		||||
        retval = subprocess.call(args)
 | 
			
		||||
        if retval == 0:
 | 
			
		||||
            fsize = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
            self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
 | 
			
		||||
            self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
 | 
			
		||||
            self.try_rename(tmpfilename, filename)
 | 
			
		||||
            self._hook_progress({
 | 
			
		||||
                'downloaded_bytes': fsize,
 | 
			
		||||
@@ -35,6 +42,6 @@ class MplayerFD(FileDownloader):
 | 
			
		||||
            })
 | 
			
		||||
            return True
 | 
			
		||||
        else:
 | 
			
		||||
            self.to_stderr(u"\n")
 | 
			
		||||
            self.report_error(u'mplayer exited with code %d' % retval)
 | 
			
		||||
            self.to_stderr('\n')
 | 
			
		||||
            self.report_error('mplayer exited with code %d' % retval)
 | 
			
		||||
            return False
 | 
			
		||||
 
 | 
			
		||||
@@ -46,13 +46,13 @@ class RtmpFD(FileDownloader):
 | 
			
		||||
                    continue
 | 
			
		||||
                mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
 | 
			
		||||
                if mobj:
 | 
			
		||||
                    downloaded_data_len = int(float(mobj.group(1))*1024)
 | 
			
		||||
                    downloaded_data_len = int(float(mobj.group(1)) * 1024)
 | 
			
		||||
                    percent = float(mobj.group(2))
 | 
			
		||||
                    if not resume_percent:
 | 
			
		||||
                        resume_percent = percent
 | 
			
		||||
                        resume_downloaded_data_len = downloaded_data_len
 | 
			
		||||
                    eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
 | 
			
		||||
                    speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
 | 
			
		||||
                    eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
 | 
			
		||||
                    speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
 | 
			
		||||
                    data_len = None
 | 
			
		||||
                    if percent > 0:
 | 
			
		||||
                        data_len = int(downloaded_data_len * 100 / percent)
 | 
			
		||||
@@ -72,7 +72,7 @@ class RtmpFD(FileDownloader):
 | 
			
		||||
                    # no percent for live streams
 | 
			
		||||
                    mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
 | 
			
		||||
                    if mobj:
 | 
			
		||||
                        downloaded_data_len = int(float(mobj.group(1))*1024)
 | 
			
		||||
                        downloaded_data_len = int(float(mobj.group(1)) * 1024)
 | 
			
		||||
                        time_now = time.time()
 | 
			
		||||
                        speed = self.calc_speed(start, time_now, downloaded_data_len)
 | 
			
		||||
                        self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
 | 
			
		||||
@@ -88,7 +88,7 @@ class RtmpFD(FileDownloader):
 | 
			
		||||
                        if not cursor_in_new_line:
 | 
			
		||||
                            self.to_screen('')
 | 
			
		||||
                        cursor_in_new_line = True
 | 
			
		||||
                        self.to_screen('[rtmpdump] '+line)
 | 
			
		||||
                        self.to_screen('[rtmpdump] ' + line)
 | 
			
		||||
            proc.wait()
 | 
			
		||||
            if not cursor_in_new_line:
 | 
			
		||||
                self.to_screen('')
 | 
			
		||||
@@ -180,7 +180,7 @@ class RtmpFD(FileDownloader):
 | 
			
		||||
        while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
 | 
			
		||||
            prevsize = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
            self.to_screen('[rtmpdump] %s bytes' % prevsize)
 | 
			
		||||
            time.sleep(5.0) # This seems to be needed
 | 
			
		||||
            time.sleep(5.0)  # This seems to be needed
 | 
			
		||||
            retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
 | 
			
		||||
            cursize = os.path.getsize(encodeFilename(tmpfilename))
 | 
			
		||||
            if prevsize == cursize and retval == RD_FAILED:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,5 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
from .abc import ABCIE
 | 
			
		||||
from .academicearth import AcademicEarthCourseIE
 | 
			
		||||
from .addanime import AddAnimeIE
 | 
			
		||||
@@ -32,6 +34,7 @@ from .bilibili import BiliBiliIE
 | 
			
		||||
from .blinkx import BlinkxIE
 | 
			
		||||
from .bliptv import BlipTVIE, BlipTVUserIE
 | 
			
		||||
from .bloomberg import BloombergIE
 | 
			
		||||
from .bpb import BpbIE
 | 
			
		||||
from .br import BRIE
 | 
			
		||||
from .breakcom import BreakIE
 | 
			
		||||
from .brightcove import BrightcoveIE
 | 
			
		||||
@@ -115,6 +118,7 @@ from .fktv import (
 | 
			
		||||
    FKTVPosteckeIE,
 | 
			
		||||
)
 | 
			
		||||
from .flickr import FlickrIE
 | 
			
		||||
from .folketinget import FolketingetIE
 | 
			
		||||
from .fourtube import FourTubeIE
 | 
			
		||||
from .franceculture import FranceCultureIE
 | 
			
		||||
from .franceinter import FranceInterIE
 | 
			
		||||
@@ -371,6 +375,7 @@ from .syfy import SyfyIE
 | 
			
		||||
from .sztvhu import SztvHuIE
 | 
			
		||||
from .tagesschau import TagesschauIE
 | 
			
		||||
from .tapely import TapelyIE
 | 
			
		||||
from .tass import TassIE
 | 
			
		||||
from .teachertube import (
 | 
			
		||||
    TeacherTubeIE,
 | 
			
		||||
    TeacherTubeUserIE,
 | 
			
		||||
@@ -379,6 +384,7 @@ from .teachingchannel import TeachingChannelIE
 | 
			
		||||
from .teamcoco import TeamcocoIE
 | 
			
		||||
from .techtalks import TechTalksIE
 | 
			
		||||
from .ted import TEDIE
 | 
			
		||||
from .telebruxelles import TeleBruxellesIE
 | 
			
		||||
from .telecinco import TelecincoIE
 | 
			
		||||
from .telemb import TeleMBIE
 | 
			
		||||
from .tenplay import TenPlayIE
 | 
			
		||||
@@ -390,6 +396,7 @@ from .thesixtyone import TheSixtyOneIE
 | 
			
		||||
from .thisav import ThisAVIE
 | 
			
		||||
from .tinypic import TinyPicIE
 | 
			
		||||
from .tlc import TlcIE, TlcDeIE
 | 
			
		||||
from .tmz import TMZIE
 | 
			
		||||
from .tnaflix import TNAFlixIE
 | 
			
		||||
from .thvideo import (
 | 
			
		||||
    THVideoIE,
 | 
			
		||||
@@ -403,6 +410,7 @@ from .trutube import TruTubeIE
 | 
			
		||||
from .tube8 import Tube8IE
 | 
			
		||||
from .tudou import TudouIE
 | 
			
		||||
from .tumblr import TumblrIE
 | 
			
		||||
from .tunein import TuneInIE
 | 
			
		||||
from .turbo import TurboIE
 | 
			
		||||
from .tutv import TutvIE
 | 
			
		||||
from .tvigle import TvigleIE
 | 
			
		||||
@@ -452,7 +460,10 @@ from .vine import (
 | 
			
		||||
    VineUserIE,
 | 
			
		||||
)
 | 
			
		||||
from .viki import VikiIE
 | 
			
		||||
from .vk import VKIE
 | 
			
		||||
from .vk import (
 | 
			
		||||
    VKIE,
 | 
			
		||||
    VKUserVideosIE,
 | 
			
		||||
)
 | 
			
		||||
from .vodlocker import VodlockerIE
 | 
			
		||||
from .vporn import VpornIE
 | 
			
		||||
from .vrt import VRTIE
 | 
			
		||||
@@ -476,6 +487,7 @@ from .wrzuta import WrzutaIE
 | 
			
		||||
from .xbef import XBefIE
 | 
			
		||||
from .xboxclips import XboxClipsIE
 | 
			
		||||
from .xhamster import XHamsterIE
 | 
			
		||||
from .xminus import XMinusIE
 | 
			
		||||
from .xnxx import XNXXIE
 | 
			
		||||
from .xvideos import XVideosIE
 | 
			
		||||
from .xtube import XTubeUserIE, XTubeIE
 | 
			
		||||
@@ -506,6 +518,10 @@ from .youtube import (
 | 
			
		||||
    YoutubeWatchLaterIE,
 | 
			
		||||
)
 | 
			
		||||
from .zdf import ZDFIE
 | 
			
		||||
from .zingmp3 import (
 | 
			
		||||
    ZingMp3SongIE,
 | 
			
		||||
    ZingMp3AlbumIE,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
_ALL_CLASSES = [
 | 
			
		||||
    klass
 | 
			
		||||
@@ -524,4 +540,4 @@ def gen_extractors():
 | 
			
		||||
 | 
			
		||||
def get_info_extractor(ie_name):
 | 
			
		||||
    """Returns the info extractor class with the given ie_name"""
 | 
			
		||||
    return globals()[ie_name+'IE']
 | 
			
		||||
    return globals()[ie_name + 'IE']
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,5 @@
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
@@ -18,15 +19,14 @@ class AcademicEarthCourseIE(InfoExtractor):
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        m = re.match(self._VALID_URL, url)
 | 
			
		||||
        playlist_id = m.group('id')
 | 
			
		||||
        playlist_id = self._match_id(url)
 | 
			
		||||
 | 
			
		||||
        webpage = self._download_webpage(url, playlist_id)
 | 
			
		||||
        title = self._html_search_regex(
 | 
			
		||||
            r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title')
 | 
			
		||||
            r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title')
 | 
			
		||||
        description = self._html_search_regex(
 | 
			
		||||
            r'<p class="excerpt"[^>]*?>(.*?)</p>',
 | 
			
		||||
            webpage, u'description', fatal=False)
 | 
			
		||||
            webpage, 'description', fatal=False)
 | 
			
		||||
        urls = re.findall(
 | 
			
		||||
            r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
 | 
			
		||||
            webpage)
 | 
			
		||||
 
 | 
			
		||||
@@ -15,8 +15,7 @@ from ..utils import (
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class AddAnimeIE(InfoExtractor):
 | 
			
		||||
 | 
			
		||||
    _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
 | 
			
		||||
    _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<id>[\w_]+)(?:.*)'
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
 | 
			
		||||
        'md5': '72954ea10bc979ab5e2eb288b21425a0',
 | 
			
		||||
@@ -29,9 +28,9 @@ class AddAnimeIE(InfoExtractor):
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        video_id = self._match_id(url)
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            mobj = re.match(self._VALID_URL, url)
 | 
			
		||||
            video_id = mobj.group('video_id')
 | 
			
		||||
            webpage = self._download_webpage(url, video_id)
 | 
			
		||||
        except ExtractorError as ee:
 | 
			
		||||
            if not isinstance(ee.cause, compat_HTTPError) or \
 | 
			
		||||
@@ -49,7 +48,7 @@ class AddAnimeIE(InfoExtractor):
 | 
			
		||||
                r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
 | 
			
		||||
                redir_webpage)
 | 
			
		||||
            if av is None:
 | 
			
		||||
                raise ExtractorError(u'Cannot find redirect math task')
 | 
			
		||||
                raise ExtractorError('Cannot find redirect math task')
 | 
			
		||||
            av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
 | 
			
		||||
 | 
			
		||||
            parsed_url = compat_urllib_parse_urlparse(url)
 | 
			
		||||
 
 | 
			
		||||
@@ -5,6 +5,7 @@ import re
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class AdultSwimIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
 | 
			
		||||
    _TEST = {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,4 @@
 | 
			
		||||
#coding: utf-8
 | 
			
		||||
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import re
 | 
			
		||||
@@ -26,8 +25,7 @@ class AparatIE(InfoExtractor):
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        m = re.match(self._VALID_URL, url)
 | 
			
		||||
        video_id = m.group('id')
 | 
			
		||||
        video_id = self._match_id(url)
 | 
			
		||||
 | 
			
		||||
        # Note: There is an easier-to-parse configuration at
 | 
			
		||||
        # http://www.aparat.com/video/video/config/videohash/%video_id
 | 
			
		||||
@@ -40,15 +38,15 @@ class AparatIE(InfoExtractor):
 | 
			
		||||
        for i, video_url in enumerate(video_urls):
 | 
			
		||||
            req = HEADRequest(video_url)
 | 
			
		||||
            res = self._request_webpage(
 | 
			
		||||
                req, video_id, note=u'Testing video URL %d' % i, errnote=False)
 | 
			
		||||
                req, video_id, note='Testing video URL %d' % i, errnote=False)
 | 
			
		||||
            if res:
 | 
			
		||||
                break
 | 
			
		||||
        else:
 | 
			
		||||
            raise ExtractorError(u'No working video URLs found')
 | 
			
		||||
            raise ExtractorError('No working video URLs found')
 | 
			
		||||
 | 
			
		||||
        title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title')
 | 
			
		||||
        title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title')
 | 
			
		||||
        thumbnail = self._search_regex(
 | 
			
		||||
            r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False)
 | 
			
		||||
            r'\s+image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
 
 | 
			
		||||
@@ -70,15 +70,17 @@ class AppleTrailersIE(InfoExtractor):
 | 
			
		||||
        uploader_id = mobj.group('company')
 | 
			
		||||
 | 
			
		||||
        playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
 | 
			
		||||
 | 
			
		||||
        def fix_html(s):
 | 
			
		||||
            s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
 | 
			
		||||
            s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
 | 
			
		||||
            # The ' in the onClick attributes are not escaped, it couldn't be parsed
 | 
			
		||||
            # like: http://trailers.apple.com/trailers/wb/gravity/
 | 
			
		||||
 | 
			
		||||
            def _clean_json(m):
 | 
			
		||||
                return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
 | 
			
		||||
            s = re.sub(self._JSON_RE, _clean_json, s)
 | 
			
		||||
            s = '<html>' + s + u'</html>'
 | 
			
		||||
            s = '<html>%s</html>' % s
 | 
			
		||||
            return s
 | 
			
		||||
        doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
 | 
			
		||||
 | 
			
		||||
@@ -86,7 +88,7 @@ class AppleTrailersIE(InfoExtractor):
 | 
			
		||||
        for li in doc.findall('./div/ul/li'):
 | 
			
		||||
            on_click = li.find('.//a').attrib['onClick']
 | 
			
		||||
            trailer_info_json = self._search_regex(self._JSON_RE,
 | 
			
		||||
                on_click, 'trailer info')
 | 
			
		||||
                                                   on_click, 'trailer info')
 | 
			
		||||
            trailer_info = json.loads(trailer_info_json)
 | 
			
		||||
            title = trailer_info['title']
 | 
			
		||||
            video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
 | 
			
		||||
 
 | 
			
		||||
@@ -192,4 +192,3 @@ class ARDIE(InfoExtractor):
 | 
			
		||||
            'upload_date': upload_date,
 | 
			
		||||
            'thumbnail': thumbnail,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -5,16 +5,15 @@ import re
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    ExtractorError,
 | 
			
		||||
    find_xpath_attr,
 | 
			
		||||
    unified_strdate,
 | 
			
		||||
    determine_ext,
 | 
			
		||||
    get_element_by_id,
 | 
			
		||||
    get_element_by_attribute,
 | 
			
		||||
    int_or_none,
 | 
			
		||||
    qualities,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# There are different sources of video in arte.tv, the extraction process 
 | 
			
		||||
# There are different sources of video in arte.tv, the extraction process
 | 
			
		||||
# is different for each one. The videos usually expire in 7 days, so we can't
 | 
			
		||||
# add tests.
 | 
			
		||||
 | 
			
		||||
@@ -102,79 +101,54 @@ class ArteTVPlus7IE(InfoExtractor):
 | 
			
		||||
            'upload_date': unified_strdate(upload_date_str),
 | 
			
		||||
            'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
 | 
			
		||||
        }
 | 
			
		||||
        qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
 | 
			
		||||
 | 
			
		||||
        all_formats = []
 | 
			
		||||
        formats = []
 | 
			
		||||
        for format_id, format_dict in player_info['VSR'].items():
 | 
			
		||||
            fmt = dict(format_dict)
 | 
			
		||||
            fmt['format_id'] = format_id
 | 
			
		||||
            all_formats.append(fmt)
 | 
			
		||||
        # Some formats use the m3u8 protocol
 | 
			
		||||
        all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
 | 
			
		||||
        def _match_lang(f):
 | 
			
		||||
            if f.get('versionCode') is None:
 | 
			
		||||
                return True
 | 
			
		||||
            # Return true if that format is in the language of the url
 | 
			
		||||
            if lang == 'fr':
 | 
			
		||||
                l = 'F'
 | 
			
		||||
            elif lang == 'de':
 | 
			
		||||
                l = 'A'
 | 
			
		||||
            else:
 | 
			
		||||
                l = lang
 | 
			
		||||
            regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
 | 
			
		||||
            return any(re.match(r, f['versionCode']) for r in regexes)
 | 
			
		||||
        # Some formats may not be in the same language as the url
 | 
			
		||||
        # TODO: Might want not to drop videos that does not match requested language
 | 
			
		||||
        # but to process those formats with lower precedence
 | 
			
		||||
        formats = filter(_match_lang, all_formats)
 | 
			
		||||
        formats = list(formats)  # in python3 filter returns an iterator
 | 
			
		||||
        if not formats:
 | 
			
		||||
            # Some videos are only available in the 'Originalversion'
 | 
			
		||||
            # they aren't tagged as being in French or German
 | 
			
		||||
            # Sometimes there are neither videos of requested lang code
 | 
			
		||||
            # nor original version videos available
 | 
			
		||||
            # For such cases we just take all_formats as is
 | 
			
		||||
            formats = all_formats
 | 
			
		||||
            if not formats:
 | 
			
		||||
                raise ExtractorError('The formats list is empty')
 | 
			
		||||
            f = dict(format_dict)
 | 
			
		||||
            versionCode = f.get('versionCode')
 | 
			
		||||
 | 
			
		||||
        if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
 | 
			
		||||
            def sort_key(f):
 | 
			
		||||
                return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
 | 
			
		||||
        else:
 | 
			
		||||
            def sort_key(f):
 | 
			
		||||
                versionCode = f.get('versionCode')
 | 
			
		||||
                if versionCode is None:
 | 
			
		||||
                    versionCode = ''
 | 
			
		||||
                return (
 | 
			
		||||
                    # Sort first by quality
 | 
			
		||||
                    int(f.get('height', -1)),
 | 
			
		||||
                    int(f.get('bitrate', -1)),
 | 
			
		||||
                    # The original version with subtitles has lower relevance
 | 
			
		||||
                    re.match(r'VO-ST(F|A)', versionCode) is None,
 | 
			
		||||
                    # The version with sourds/mal subtitles has also lower relevance
 | 
			
		||||
                    re.match(r'VO?(F|A)-STM\1', versionCode) is None,
 | 
			
		||||
                    # Prefer http downloads over m3u8
 | 
			
		||||
                    0 if f['url'].endswith('m3u8') else 1,
 | 
			
		||||
                )
 | 
			
		||||
        formats = sorted(formats, key=sort_key)
 | 
			
		||||
        def _format(format_info):
 | 
			
		||||
            info = {
 | 
			
		||||
                'format_id': format_info['format_id'],
 | 
			
		||||
                'format_note': '%s, %s' % (format_info.get('versionCode'), format_info.get('versionLibelle')),
 | 
			
		||||
                'width': int_or_none(format_info.get('width')),
 | 
			
		||||
                'height': int_or_none(format_info.get('height')),
 | 
			
		||||
                'tbr': int_or_none(format_info.get('bitrate')),
 | 
			
		||||
            langcode = {
 | 
			
		||||
                'fr': 'F',
 | 
			
		||||
                'de': 'A',
 | 
			
		||||
            }.get(lang, lang)
 | 
			
		||||
            lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
 | 
			
		||||
            lang_pref = (
 | 
			
		||||
                None if versionCode is None else (
 | 
			
		||||
                    10 if any(re.match(r, versionCode) for r in lang_rexs)
 | 
			
		||||
                    else -10))
 | 
			
		||||
            source_pref = 0
 | 
			
		||||
            if versionCode is not None:
 | 
			
		||||
                # The original version with subtitles has lower relevance
 | 
			
		||||
                if re.match(r'VO-ST(F|A)', versionCode):
 | 
			
		||||
                    source_pref -= 10
 | 
			
		||||
                # The version with sourds/mal subtitles has also lower relevance
 | 
			
		||||
                elif re.match(r'VO?(F|A)-STM\1', versionCode):
 | 
			
		||||
                    source_pref -= 9
 | 
			
		||||
            format = {
 | 
			
		||||
                'format_id': format_id,
 | 
			
		||||
                'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
 | 
			
		||||
                'language_preference': lang_pref,
 | 
			
		||||
                'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
 | 
			
		||||
                'width': int_or_none(f.get('width')),
 | 
			
		||||
                'height': int_or_none(f.get('height')),
 | 
			
		||||
                'tbr': int_or_none(f.get('bitrate')),
 | 
			
		||||
                'quality': qfunc(f['quality']),
 | 
			
		||||
                'source_preference': source_pref,
 | 
			
		||||
            }
 | 
			
		||||
            if format_info['mediaType'] == 'rtmp':
 | 
			
		||||
                info['url'] = format_info['streamer']
 | 
			
		||||
                info['play_path'] = 'mp4:' + format_info['url']
 | 
			
		||||
                info['ext'] = 'flv'
 | 
			
		||||
            else:
 | 
			
		||||
                info['url'] = format_info['url']
 | 
			
		||||
                info['ext'] = determine_ext(info['url'])
 | 
			
		||||
            return info
 | 
			
		||||
        info_dict['formats'] = [_format(f) for f in formats]
 | 
			
		||||
 | 
			
		||||
            if f.get('mediaType') == 'rtmp':
 | 
			
		||||
                format['url'] = f['streamer']
 | 
			
		||||
                format['play_path'] = 'mp4:' + f['url']
 | 
			
		||||
                format['ext'] = 'flv'
 | 
			
		||||
            else:
 | 
			
		||||
                format['url'] = f['url']
 | 
			
		||||
 | 
			
		||||
            formats.append(format)
 | 
			
		||||
 | 
			
		||||
        self._sort_formats(formats)
 | 
			
		||||
 | 
			
		||||
        info_dict['formats'] = formats
 | 
			
		||||
        return info_dict
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -12,17 +12,17 @@ class AudiomackIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
 | 
			
		||||
    IE_NAME = 'audiomack'
 | 
			
		||||
    _TESTS = [
 | 
			
		||||
        #hosted on audiomack
 | 
			
		||||
        # hosted on audiomack
 | 
			
		||||
        {
 | 
			
		||||
            'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
 | 
			
		||||
            'info_dict':
 | 
			
		||||
            {
 | 
			
		||||
                'id' : 'roosh-williams/extraordinary',
 | 
			
		||||
                'id': 'roosh-williams/extraordinary',
 | 
			
		||||
                'ext': 'mp3',
 | 
			
		||||
                'title': 'Roosh Williams - Extraordinary'
 | 
			
		||||
            }
 | 
			
		||||
        },
 | 
			
		||||
        #hosted on soundcloud via audiomack
 | 
			
		||||
        # hosted on soundcloud via audiomack
 | 
			
		||||
        {
 | 
			
		||||
            'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
 | 
			
		||||
            'file': '172419696.mp3',
 | 
			
		||||
@@ -49,7 +49,7 @@ class AudiomackIE(InfoExtractor):
 | 
			
		||||
            raise ExtractorError("Unable to deduce api url of song")
 | 
			
		||||
        realurl = api_response["url"]
 | 
			
		||||
 | 
			
		||||
        #Audiomack wraps a lot of soundcloud tracks in their branded wrapper
 | 
			
		||||
        # Audiomack wraps a lot of soundcloud tracks in their branded wrapper
 | 
			
		||||
        # - if so, pass the work off to the soundcloud extractor
 | 
			
		||||
        if SoundcloudIE.suitable(realurl):
 | 
			
		||||
            return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
 | 
			
		||||
 
 | 
			
		||||
@@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://bambuser.com/v/4050584',
 | 
			
		||||
        # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
 | 
			
		||||
        #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
 | 
			
		||||
        # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': '4050584',
 | 
			
		||||
            'ext': 'flv',
 | 
			
		||||
@@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):
 | 
			
		||||
        mobj = re.match(self._VALID_URL, url)
 | 
			
		||||
        video_id = mobj.group('id')
 | 
			
		||||
        info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
 | 
			
		||||
            '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
 | 
			
		||||
                    '&api_key=%s&vid=%s' % (self._API_KEY, video_id))
 | 
			
		||||
        info_json = self._download_webpage(info_url, video_id)
 | 
			
		||||
        info = json.loads(info_json)['result']
 | 
			
		||||
 | 
			
		||||
@@ -73,10 +73,11 @@ class BambuserChannelIE(InfoExtractor):
 | 
			
		||||
        urls = []
 | 
			
		||||
        last_id = ''
 | 
			
		||||
        for i in itertools.count(1):
 | 
			
		||||
            req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
 | 
			
		||||
            req_url = (
 | 
			
		||||
                'http://bambuser.com/xhr-api/index.php?username={user}'
 | 
			
		||||
                '&sort=created&access_mode=0%2C1%2C2&limit={count}'
 | 
			
		||||
                '&method=broadcast&format=json&vid_older_than={last}'
 | 
			
		||||
                ).format(user=user, count=self._STEP, last=last_id)
 | 
			
		||||
            ).format(user=user, count=self._STEP, last=last_id)
 | 
			
		||||
            req = compat_urllib_request.Request(req_url)
 | 
			
		||||
            # Without setting this header, we wouldn't get any result
 | 
			
		||||
            req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
 | 
			
		||||
 
 | 
			
		||||
@@ -83,12 +83,12 @@ class BandcampIE(InfoExtractor):
 | 
			
		||||
        initial_url = mp3_info['url']
 | 
			
		||||
        re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
 | 
			
		||||
        m_url = re.match(re_url, initial_url)
 | 
			
		||||
        #We build the url we will use to get the final track url
 | 
			
		||||
        # We build the url we will use to get the final track url
 | 
			
		||||
        # This url is build in Bandcamp in the script download_bunde_*.js
 | 
			
		||||
        request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
 | 
			
		||||
        final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
 | 
			
		||||
        # If we could correctly generate the .rand field the url would be
 | 
			
		||||
        #in the "download_url" key
 | 
			
		||||
        # in the "download_url" key
 | 
			
		||||
        final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
 
 | 
			
		||||
@@ -165,10 +165,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
 | 
			
		||||
        webpage = self._download_webpage(url, group_id, 'Downloading video page')
 | 
			
		||||
        if re.search(r'id="emp-error" class="notinuk">', webpage):
 | 
			
		||||
            raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only',
 | 
			
		||||
                expected=True)
 | 
			
		||||
                                 expected=True)
 | 
			
		||||
 | 
			
		||||
        playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
 | 
			
		||||
            'Downloading playlist XML')
 | 
			
		||||
                                      'Downloading playlist XML')
 | 
			
		||||
 | 
			
		||||
        no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
 | 
			
		||||
        if no_items is not None:
 | 
			
		||||
@@ -195,7 +195,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
 | 
			
		||||
            duration = int(item.get('duration'))
 | 
			
		||||
 | 
			
		||||
            media_selection = self._download_xml(
 | 
			
		||||
                'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s'  % programme_id,
 | 
			
		||||
                'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
 | 
			
		||||
                programme_id, 'Downloading media selection XML')
 | 
			
		||||
 | 
			
		||||
            for media in self._extract_medias(media_selection):
 | 
			
		||||
@@ -220,4 +220,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
 | 
			
		||||
            'duration': duration,
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
            'subtitles': subtitles,
 | 
			
		||||
        }
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -40,7 +40,7 @@ class BeegIE(InfoExtractor):
 | 
			
		||||
 | 
			
		||||
        title = self._html_search_regex(
 | 
			
		||||
            r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        description = self._html_search_regex(
 | 
			
		||||
            r'<meta name="description" content="([^"]*)"',
 | 
			
		||||
            webpage, 'description', fatal=False)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
#coding: utf-8
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
 
 | 
			
		||||
@@ -71,11 +71,12 @@ class BlipTVIE(SubtitlesInfoExtractor):
 | 
			
		||||
        mobj = re.match(self._VALID_URL, url)
 | 
			
		||||
        lookup_id = mobj.group('lookup_id')
 | 
			
		||||
 | 
			
		||||
        # See https://github.com/rg3/youtube-dl/issues/857
 | 
			
		||||
        # See https://github.com/rg3/youtube-dl/issues/857 and
 | 
			
		||||
        # https://github.com/rg3/youtube-dl/issues/4197
 | 
			
		||||
        if lookup_id:
 | 
			
		||||
            info_page = self._download_webpage(
 | 
			
		||||
                'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
 | 
			
		||||
            video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id')
 | 
			
		||||
            video_id = self._search_regex(r'config\.id\s*=\s*"([0-9]+)', info_page, 'video_id')
 | 
			
		||||
        else:
 | 
			
		||||
            video_id = mobj.group('id')
 | 
			
		||||
 | 
			
		||||
@@ -165,9 +166,17 @@ class BlipTVIE(SubtitlesInfoExtractor):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BlipTVUserIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
 | 
			
		||||
    _VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
 | 
			
		||||
    _PAGE_SIZE = 12
 | 
			
		||||
    IE_NAME = 'blip.tv:user'
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://blip.tv/actone',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'actone',
 | 
			
		||||
            'title': 'Act One: The Series',
 | 
			
		||||
        },
 | 
			
		||||
        'playlist_count': 5,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        mobj = re.match(self._VALID_URL, url)
 | 
			
		||||
@@ -178,6 +187,7 @@ class BlipTVUserIE(InfoExtractor):
 | 
			
		||||
        page = self._download_webpage(url, username, 'Downloading user page')
 | 
			
		||||
        mobj = re.search(r'data-users-id="([^"]+)"', page)
 | 
			
		||||
        page_base = page_base % mobj.group(1)
 | 
			
		||||
        title = self._og_search_title(page)
 | 
			
		||||
 | 
			
		||||
        # Download video ids using BlipTV Ajax calls. Result size per
 | 
			
		||||
        # query is limited (currently to 12 videos) so we need to query
 | 
			
		||||
@@ -214,4 +224,5 @@ class BlipTVUserIE(InfoExtractor):
 | 
			
		||||
 | 
			
		||||
        urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
 | 
			
		||||
        url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
 | 
			
		||||
        return [self.playlist_result(url_entries, playlist_title=username)]
 | 
			
		||||
        return self.playlist_result(
 | 
			
		||||
            url_entries, playlist_title=title, playlist_id=username)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										37
									
								
								youtube_dl/extractor/bpb.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								youtube_dl/extractor/bpb.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,37 @@
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BpbIE(InfoExtractor):
 | 
			
		||||
    IE_DESC = 'Bundeszentrale für politische Bildung'
 | 
			
		||||
    _VALID_URL = r'http://www\.bpb\.de/mediathek/(?P<id>[0-9]+)/'
 | 
			
		||||
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
 | 
			
		||||
        'md5': '0792086e8e2bfbac9cdf27835d5f2093',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': '297',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
 | 
			
		||||
            'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.'
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        video_id = self._match_id(url)
 | 
			
		||||
        webpage = self._download_webpage(url, video_id)
 | 
			
		||||
 | 
			
		||||
        title = self._html_search_regex(
 | 
			
		||||
            r'<h2 class="white">(.*?)</h2>', webpage, 'title')
 | 
			
		||||
        video_url = self._html_search_regex(
 | 
			
		||||
            r'(http://film\.bpb\.de/player/dokument_[0-9]+\.mp4)',
 | 
			
		||||
            webpage, 'video URL')
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
            'url': video_url,
 | 
			
		||||
            'title': title,
 | 
			
		||||
            'description': self._og_search_description(webpage),
 | 
			
		||||
        }
 | 
			
		||||
@@ -111,6 +111,8 @@ class BrightcoveIE(InfoExtractor):
 | 
			
		||||
                            lambda m: m.group(1) + '/>', object_str)
 | 
			
		||||
        # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
 | 
			
		||||
        object_str = object_str.replace('<--', '<!--')
 | 
			
		||||
        # remove namespace to simplify extraction
 | 
			
		||||
        object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
 | 
			
		||||
        object_str = fix_xml_ampersands(object_str)
 | 
			
		||||
 | 
			
		||||
        object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
 | 
			
		||||
@@ -219,7 +221,7 @@ class BrightcoveIE(InfoExtractor):
 | 
			
		||||
        webpage = self._download_webpage(req, video_id)
 | 
			
		||||
 | 
			
		||||
        error_msg = self._html_search_regex(
 | 
			
		||||
            r"<h1>We're sorry.</h1>\s*<p>(.*?)</p>", webpage,
 | 
			
		||||
            r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
 | 
			
		||||
            'error message', default=None)
 | 
			
		||||
        if error_msg is not None:
 | 
			
		||||
            raise ExtractorError(
 | 
			
		||||
 
 | 
			
		||||
@@ -112,4 +112,4 @@ class CanalplusIE(InfoExtractor):
 | 
			
		||||
            'like_count': int(infos.find('NB_LIKES').text),
 | 
			
		||||
            'comment_count': int(infos.find('NB_COMMENTS').text),
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
        }
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -45,4 +45,4 @@ class CBSIE(InfoExtractor):
 | 
			
		||||
        real_id = self._search_regex(
 | 
			
		||||
            r"video\.settings\.pid\s*=\s*'([^']+)';",
 | 
			
		||||
            webpage, 'real video ID')
 | 
			
		||||
        return self.url_result(u'theplatform:%s' % real_id)
 | 
			
		||||
        return self.url_result('theplatform:%s' % real_id)
 | 
			
		||||
 
 | 
			
		||||
@@ -84,4 +84,4 @@ class CBSNewsIE(InfoExtractor):
 | 
			
		||||
            'thumbnail': thumbnail,
 | 
			
		||||
            'duration': duration,
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
        }
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -92,7 +92,7 @@ class CeskaTelevizeIE(InfoExtractor):
 | 
			
		||||
        req.add_header('Referer', url)
 | 
			
		||||
 | 
			
		||||
        playlist = self._download_xml(req, video_id)
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        formats = []
 | 
			
		||||
        for i in playlist.find('smilRoot/body'):
 | 
			
		||||
            if 'AD' not in i.attrib['id']:
 | 
			
		||||
 
 | 
			
		||||
@@ -5,6 +5,7 @@ import re
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from ..utils import ExtractorError
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Channel9IE(InfoExtractor):
 | 
			
		||||
    '''
 | 
			
		||||
    Common extractor for channel9.msdn.com.
 | 
			
		||||
@@ -31,7 +32,7 @@ class Channel9IE(InfoExtractor):
 | 
			
		||||
                'session_code': 'KOS002',
 | 
			
		||||
                'session_day': 'Day 1',
 | 
			
		||||
                'session_room': 'Arena 1A',
 | 
			
		||||
                'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
 | 
			
		||||
                'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
 | 
			
		||||
            },
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
@@ -44,7 +45,7 @@ class Channel9IE(InfoExtractor):
 | 
			
		||||
                'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
 | 
			
		||||
                'duration': 1540,
 | 
			
		||||
                'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
 | 
			
		||||
                'authors': [ 'Mike Wilmot' ],
 | 
			
		||||
                'authors': ['Mike Wilmot'],
 | 
			
		||||
            },
 | 
			
		||||
        }
 | 
			
		||||
    ]
 | 
			
		||||
@@ -83,7 +84,7 @@ class Channel9IE(InfoExtractor):
 | 
			
		||||
            'format_id': x.group('quality'),
 | 
			
		||||
            'format_note': x.group('note'),
 | 
			
		||||
            'format': '%s (%s)' % (x.group('quality'), x.group('note')),
 | 
			
		||||
            'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
 | 
			
		||||
            'filesize': self._restore_bytes(x.group('filesize')),  # File size is approximate
 | 
			
		||||
            'preference': self._known_formats.index(x.group('quality')),
 | 
			
		||||
            'vcodec': 'none' if x.group('note') == 'Audio only' else None,
 | 
			
		||||
        } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
 | 
			
		||||
@@ -187,32 +188,33 @@ class Channel9IE(InfoExtractor):
 | 
			
		||||
        view_count = self._extract_view_count(html)
 | 
			
		||||
        comment_count = self._extract_comment_count(html)
 | 
			
		||||
 | 
			
		||||
        common = {'_type': 'video',
 | 
			
		||||
                  'id': content_path,
 | 
			
		||||
                  'description': description,
 | 
			
		||||
                  'thumbnail': thumbnail,
 | 
			
		||||
                  'duration': duration,
 | 
			
		||||
                  'avg_rating': avg_rating,
 | 
			
		||||
                  'rating_count': rating_count,
 | 
			
		||||
                  'view_count': view_count,
 | 
			
		||||
                  'comment_count': comment_count,
 | 
			
		||||
                }
 | 
			
		||||
        common = {
 | 
			
		||||
            '_type': 'video',
 | 
			
		||||
            'id': content_path,
 | 
			
		||||
            'description': description,
 | 
			
		||||
            'thumbnail': thumbnail,
 | 
			
		||||
            'duration': duration,
 | 
			
		||||
            'avg_rating': avg_rating,
 | 
			
		||||
            'rating_count': rating_count,
 | 
			
		||||
            'view_count': view_count,
 | 
			
		||||
            'comment_count': comment_count,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        result = []
 | 
			
		||||
 | 
			
		||||
        if slides is not None:
 | 
			
		||||
            d = common.copy()
 | 
			
		||||
            d.update({ 'title': title + '-Slides', 'url': slides })
 | 
			
		||||
            d.update({'title': title + '-Slides', 'url': slides})
 | 
			
		||||
            result.append(d)
 | 
			
		||||
 | 
			
		||||
        if zip_ is not None:
 | 
			
		||||
            d = common.copy()
 | 
			
		||||
            d.update({ 'title': title + '-Zip', 'url': zip_ })
 | 
			
		||||
            d.update({'title': title + '-Zip', 'url': zip_})
 | 
			
		||||
            result.append(d)
 | 
			
		||||
 | 
			
		||||
        if len(formats) > 0:
 | 
			
		||||
            d = common.copy()
 | 
			
		||||
            d.update({ 'title': title, 'formats': formats })
 | 
			
		||||
            d.update({'title': title, 'formats': formats})
 | 
			
		||||
            result.append(d)
 | 
			
		||||
 | 
			
		||||
        return result
 | 
			
		||||
@@ -270,5 +272,5 @@ class Channel9IE(InfoExtractor):
 | 
			
		||||
            else:
 | 
			
		||||
                raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
 | 
			
		||||
 | 
			
		||||
        else: # Assuming list
 | 
			
		||||
        else:  # Assuming list
 | 
			
		||||
            return self._extract_list(content_path)
 | 
			
		||||
 
 | 
			
		||||
@@ -77,7 +77,7 @@ class CinemassacreIE(InfoExtractor):
 | 
			
		||||
        if videolist_url:
 | 
			
		||||
            videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
 | 
			
		||||
            formats = []
 | 
			
		||||
            baseurl = vidurl[:vidurl.rfind('/')+1]
 | 
			
		||||
            baseurl = vidurl[:vidurl.rfind('/') + 1]
 | 
			
		||||
            for video in videolist.findall('.//video'):
 | 
			
		||||
                src = video.get('src')
 | 
			
		||||
                if not src:
 | 
			
		||||
 
 | 
			
		||||
@@ -24,7 +24,7 @@ class ClipfishIE(InfoExtractor):
 | 
			
		||||
            'title': 'FIFA 14 - E3 2013 Trailer',
 | 
			
		||||
            'duration': 82,
 | 
			
		||||
        },
 | 
			
		||||
        u'skip': 'Blocked in the US'
 | 
			
		||||
        'skip': 'Blocked in the US'
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
@@ -34,7 +34,7 @@ class ClipfishIE(InfoExtractor):
 | 
			
		||||
        info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
 | 
			
		||||
                    (video_id, int(time.time())))
 | 
			
		||||
        doc = self._download_xml(
 | 
			
		||||
            info_url, video_id, note=u'Downloading info page')
 | 
			
		||||
            info_url, video_id, note='Downloading info page')
 | 
			
		||||
        title = doc.find('title').text
 | 
			
		||||
        video_url = doc.find('filename').text
 | 
			
		||||
        if video_url is None:
 | 
			
		||||
 
 | 
			
		||||
@@ -39,6 +39,7 @@ class ClipsyndicateIE(InfoExtractor):
 | 
			
		||||
            transform_source=fix_xml_ampersands)
 | 
			
		||||
 | 
			
		||||
        track_doc = pdoc.find('trackList/track')
 | 
			
		||||
 | 
			
		||||
        def find_param(name):
 | 
			
		||||
            node = find_xpath_attr(track_doc, './/param', 'name', name)
 | 
			
		||||
            if node is not None:
 | 
			
		||||
 
 | 
			
		||||
@@ -25,8 +25,7 @@ class CNNIE(InfoExtractor):
 | 
			
		||||
            'duration': 135,
 | 
			
		||||
            'upload_date': '20130609',
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    {
 | 
			
		||||
    }, {
 | 
			
		||||
        "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
 | 
			
		||||
        "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
 | 
			
		||||
        "info_dict": {
 | 
			
		||||
 
 | 
			
		||||
@@ -10,47 +10,46 @@ from ..utils import int_or_none
 | 
			
		||||
class CollegeHumorIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
 | 
			
		||||
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
 | 
			
		||||
        'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': '6902724',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'Comic-Con Cosplay Catastrophe',
 | 
			
		||||
            'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
 | 
			
		||||
            'age_limit': 13,
 | 
			
		||||
            'duration': 187,
 | 
			
		||||
    _TESTS = [
 | 
			
		||||
        {
 | 
			
		||||
            'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
 | 
			
		||||
            'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
 | 
			
		||||
            'info_dict': {
 | 
			
		||||
                'id': '6902724',
 | 
			
		||||
                'ext': 'mp4',
 | 
			
		||||
                'title': 'Comic-Con Cosplay Catastrophe',
 | 
			
		||||
                'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.",
 | 
			
		||||
                'age_limit': 13,
 | 
			
		||||
                'duration': 187,
 | 
			
		||||
            },
 | 
			
		||||
        }, {
 | 
			
		||||
            'url': 'http://www.collegehumor.com/video/3505939/font-conference',
 | 
			
		||||
            'md5': '72fa701d8ef38664a4dbb9e2ab721816',
 | 
			
		||||
            'info_dict': {
 | 
			
		||||
                'id': '3505939',
 | 
			
		||||
                'ext': 'mp4',
 | 
			
		||||
                'title': 'Font Conference',
 | 
			
		||||
                'description': "This video wasn't long enough, so we made it double-spaced.",
 | 
			
		||||
                'age_limit': 10,
 | 
			
		||||
                'duration': 179,
 | 
			
		||||
            },
 | 
			
		||||
        }, {
 | 
			
		||||
            # embedded youtube video
 | 
			
		||||
            'url': 'http://www.collegehumor.com/embed/6950306',
 | 
			
		||||
            'info_dict': {
 | 
			
		||||
                'id': 'Z-bao9fg6Yc',
 | 
			
		||||
                'ext': 'mp4',
 | 
			
		||||
                'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
 | 
			
		||||
                'uploader': 'Mark Dice',
 | 
			
		||||
                'uploader_id': 'MarkDice',
 | 
			
		||||
                'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
 | 
			
		||||
                'upload_date': '20140127',
 | 
			
		||||
            },
 | 
			
		||||
            'params': {
 | 
			
		||||
                'skip_download': True,
 | 
			
		||||
            },
 | 
			
		||||
            'add_ie': ['Youtube'],
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    {
 | 
			
		||||
        'url': 'http://www.collegehumor.com/video/3505939/font-conference',
 | 
			
		||||
        'md5': '72fa701d8ef38664a4dbb9e2ab721816',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': '3505939',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'Font Conference',
 | 
			
		||||
            'description': "This video wasn't long enough, so we made it double-spaced.",
 | 
			
		||||
            'age_limit': 10,
 | 
			
		||||
            'duration': 179,
 | 
			
		||||
        },
 | 
			
		||||
    },
 | 
			
		||||
    # embedded youtube video
 | 
			
		||||
    {
 | 
			
		||||
        'url': 'http://www.collegehumor.com/embed/6950306',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'Z-bao9fg6Yc',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
 | 
			
		||||
            'uploader': 'Mark Dice',
 | 
			
		||||
            'uploader_id': 'MarkDice',
 | 
			
		||||
            'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
 | 
			
		||||
            'upload_date': '20140127',
 | 
			
		||||
        },
 | 
			
		||||
        'params': {
 | 
			
		||||
            'skip_download': True,
 | 
			
		||||
        },
 | 
			
		||||
        'add_ie': ['Youtube'],
 | 
			
		||||
    },
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,6 @@ from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
from .common import InfoExtractor
 | 
			
		||||
from .mtv import MTVServicesInfoExtractor
 | 
			
		||||
from ..utils import (
 | 
			
		||||
    compat_str,
 | 
			
		||||
@@ -110,9 +109,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
 | 
			
		||||
        if mobj is None:
 | 
			
		||||
            raise ExtractorError('Invalid URL: %s' % url)
 | 
			
		||||
        mobj = re.match(self._VALID_URL, url)
 | 
			
		||||
 | 
			
		||||
        if mobj.group('shortname'):
 | 
			
		||||
            if mobj.group('shortname') in ('tds', 'thedailyshow'):
 | 
			
		||||
 
 | 
			
		||||
@@ -43,7 +43,11 @@ class InfoExtractor(object):
 | 
			
		||||
    information possibly downloading the video to the file system, among
 | 
			
		||||
    other possible outcomes.
 | 
			
		||||
 | 
			
		||||
    The dictionaries must include the following fields:
 | 
			
		||||
    The type field determines the the type of the result.
 | 
			
		||||
    By far the most common value (and the default if _type is missing) is
 | 
			
		||||
    "video", which indicates a single video.
 | 
			
		||||
 | 
			
		||||
    For a video, the dictionaries must include the following fields:
 | 
			
		||||
 | 
			
		||||
    id:             Video identifier.
 | 
			
		||||
    title:          Video title, unescaped.
 | 
			
		||||
@@ -87,6 +91,11 @@ class InfoExtractor(object):
 | 
			
		||||
                                 by this field, regardless of all other values.
 | 
			
		||||
                                 -1 for default (order by other properties),
 | 
			
		||||
                                 -2 or smaller for less than default.
 | 
			
		||||
                    * language_preference  Is this in the correct requested
 | 
			
		||||
                                 language?
 | 
			
		||||
                                 10 if it's what the URL is about,
 | 
			
		||||
                                 -1 for default (don't know),
 | 
			
		||||
                                 -10 otherwise, other values reserved for now.
 | 
			
		||||
                    * quality    Order number of the video quality of this
 | 
			
		||||
                                 format, irrespective of the file format.
 | 
			
		||||
                                 -1 for default (order by other properties),
 | 
			
		||||
@@ -146,6 +155,38 @@ class InfoExtractor(object):
 | 
			
		||||
 | 
			
		||||
    Unless mentioned otherwise, None is equivalent to absence of information.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    _type "playlist" indicates multiple videos.
 | 
			
		||||
    There must be a key "entries", which is a list or a PagedList object, each
 | 
			
		||||
    element of which is a valid dictionary under this specfication.
 | 
			
		||||
 | 
			
		||||
    Additionally, playlists can have "title" and "id" attributes with the same
 | 
			
		||||
    semantics as videos (see above).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    _type "multi_video" indicates that there are multiple videos that
 | 
			
		||||
    form a single show, for examples multiple acts of an opera or TV episode.
 | 
			
		||||
    It must have an entries key like a playlist and contain all the keys
 | 
			
		||||
    required for a video at the same time.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    _type "url" indicates that the video must be extracted from another
 | 
			
		||||
    location, possibly by a different extractor. Its only required key is:
 | 
			
		||||
    "url" - the next URL to extract.
 | 
			
		||||
 | 
			
		||||
    Additionally, it may have properties believed to be identical to the
 | 
			
		||||
    resolved entity, for example "title" if the title of the referred video is
 | 
			
		||||
    known ahead of time.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    _type "url_transparent" entities have the same specification as "url", but
 | 
			
		||||
    indicate that the given additional information is more precise than the one
 | 
			
		||||
    associated with the resolved URL.
 | 
			
		||||
    This is useful when a site employs a video service that hosts the video and
 | 
			
		||||
    its technical metadata, but that video service does not embed a useful
 | 
			
		||||
    title, description etc.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    Subclasses of this one should re-define the _real_initialize() and
 | 
			
		||||
    _real_extract() methods and define a _VALID_URL regexp.
 | 
			
		||||
    Probably, they should also be added to the list of extractors.
 | 
			
		||||
@@ -255,9 +296,11 @@ class InfoExtractor(object):
 | 
			
		||||
        content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
 | 
			
		||||
        return (content, urlh)
 | 
			
		||||
 | 
			
		||||
    def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True):
 | 
			
		||||
    def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None):
 | 
			
		||||
        content_type = urlh.headers.get('Content-Type', '')
 | 
			
		||||
        webpage_bytes = urlh.read()
 | 
			
		||||
        if prefix is not None:
 | 
			
		||||
            webpage_bytes = prefix + webpage_bytes
 | 
			
		||||
        m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
 | 
			
		||||
        if m:
 | 
			
		||||
            encoding = m.group(1)
 | 
			
		||||
@@ -382,17 +425,18 @@ class InfoExtractor(object):
 | 
			
		||||
        """Report attempt to log in."""
 | 
			
		||||
        self.to_screen('Logging in')
 | 
			
		||||
 | 
			
		||||
    #Methods for following #608
 | 
			
		||||
    # Methods for following #608
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def url_result(url, ie=None, video_id=None):
 | 
			
		||||
        """Returns a url that points to a page that should be processed"""
 | 
			
		||||
        #TODO: ie should be the class used for getting the info
 | 
			
		||||
        # TODO: ie should be the class used for getting the info
 | 
			
		||||
        video_info = {'_type': 'url',
 | 
			
		||||
                      'url': url,
 | 
			
		||||
                      'ie_key': ie}
 | 
			
		||||
        if video_id is not None:
 | 
			
		||||
            video_info['id'] = video_id
 | 
			
		||||
        return video_info
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def playlist_result(entries, playlist_id=None, playlist_title=None):
 | 
			
		||||
        """Returns a playlist"""
 | 
			
		||||
@@ -436,7 +480,7 @@ class InfoExtractor(object):
 | 
			
		||||
            raise RegexNotFoundError('Unable to extract %s' % _name)
 | 
			
		||||
        else:
 | 
			
		||||
            self._downloader.report_warning('unable to extract %s; '
 | 
			
		||||
                'please report this issue on http://yt-dl.org/bug' % _name)
 | 
			
		||||
                                            'please report this issue on http://yt-dl.org/bug' % _name)
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
    def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
 | 
			
		||||
@@ -476,7 +520,7 @@ class InfoExtractor(object):
 | 
			
		||||
                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
 | 
			
		||||
            except (IOError, netrc.NetrcParseError) as err:
 | 
			
		||||
                self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        return (username, password)
 | 
			
		||||
 | 
			
		||||
    def _get_tfa_info(self):
 | 
			
		||||
@@ -570,7 +614,7 @@ class InfoExtractor(object):
 | 
			
		||||
 | 
			
		||||
    def _twitter_search_player(self, html):
 | 
			
		||||
        return self._html_search_meta('twitter:player', html,
 | 
			
		||||
            'twitter card player')
 | 
			
		||||
                                      'twitter card player')
 | 
			
		||||
 | 
			
		||||
    def _sort_formats(self, formats):
 | 
			
		||||
        if not formats:
 | 
			
		||||
@@ -615,6 +659,7 @@ class InfoExtractor(object):
 | 
			
		||||
 | 
			
		||||
            return (
 | 
			
		||||
                preference,
 | 
			
		||||
                f.get('language_preference') if f.get('language_preference') is not None else -1,
 | 
			
		||||
                f.get('quality') if f.get('quality') is not None else -1,
 | 
			
		||||
                f.get('height') if f.get('height') is not None else -1,
 | 
			
		||||
                f.get('width') if f.get('width') is not None else -1,
 | 
			
		||||
 
 | 
			
		||||
@@ -54,7 +54,7 @@ class CrackedIE(InfoExtractor):
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
            'url':video_url,
 | 
			
		||||
            'url': video_url,
 | 
			
		||||
            'title': title,
 | 
			
		||||
            'description': description,
 | 
			
		||||
            'timestamp': timestamp,
 | 
			
		||||
@@ -62,4 +62,4 @@ class CrackedIE(InfoExtractor):
 | 
			
		||||
            'comment_count': comment_count,
 | 
			
		||||
            'height': height,
 | 
			
		||||
            'width': width,
 | 
			
		||||
        }
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -69,11 +69,9 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
 | 
			
		||||
        login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 | 
			
		||||
        self._download_webpage(login_request, None, False, 'Wrong login info')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _real_initialize(self):
 | 
			
		||||
        self._login()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _decrypt_subtitles(self, data, iv, id):
 | 
			
		||||
        data = bytes_to_intlist(data)
 | 
			
		||||
        iv = bytes_to_intlist(iv)
 | 
			
		||||
@@ -99,8 +97,10 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
 | 
			
		||||
            return shaHash + [0] * 12
 | 
			
		||||
 | 
			
		||||
        key = obfuscate_key(id)
 | 
			
		||||
 | 
			
		||||
        class Counter:
 | 
			
		||||
            __value = iv
 | 
			
		||||
 | 
			
		||||
            def next_value(self):
 | 
			
		||||
                temp = self.__value
 | 
			
		||||
                self.__value = inc(self.__value)
 | 
			
		||||
@@ -183,7 +183,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
 | 
			
		||||
 | 
			
		||||
        return output
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self,url):
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        mobj = re.match(self._VALID_URL, url)
 | 
			
		||||
        video_id = mobj.group('video_id')
 | 
			
		||||
 | 
			
		||||
@@ -226,10 +226,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
 | 
			
		||||
        formats = []
 | 
			
		||||
        for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
 | 
			
		||||
            stream_quality, stream_format = self._FORMAT_IDS[fmt]
 | 
			
		||||
            video_format = fmt+'p'
 | 
			
		||||
            video_format = fmt + 'p'
 | 
			
		||||
            streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
 | 
			
		||||
            # urlencode doesn't work!
 | 
			
		||||
            streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
 | 
			
		||||
            streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
 | 
			
		||||
            streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
 | 
			
		||||
            streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
 | 
			
		||||
            streamdata = self._download_xml(
 | 
			
		||||
@@ -248,8 +248,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
 | 
			
		||||
        subtitles = {}
 | 
			
		||||
        sub_format = self._downloader.params.get('subtitlesformat', 'srt')
 | 
			
		||||
        for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
 | 
			
		||||
            sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
 | 
			
		||||
                                              video_id, note='Downloading subtitles for '+sub_name)
 | 
			
		||||
            sub_page = self._download_webpage(
 | 
			
		||||
                'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
 | 
			
		||||
                video_id, note='Downloading subtitles for ' + sub_name)
 | 
			
		||||
            id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
 | 
			
		||||
            iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
 | 
			
		||||
            data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
 | 
			
		||||
@@ -264,8 +265,6 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
 | 
			
		||||
            if not lang_code:
 | 
			
		||||
                continue
 | 
			
		||||
            sub_root = xml.etree.ElementTree.fromstring(subtitle)
 | 
			
		||||
            if not sub_root:
 | 
			
		||||
                subtitles[lang_code] = ''
 | 
			
		||||
            if sub_format == 'ass':
 | 
			
		||||
                subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
 | 
			
		||||
            else:
 | 
			
		||||
@@ -276,14 +275,14 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id':          video_id,
 | 
			
		||||
            'title':       video_title,
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
            'title': video_title,
 | 
			
		||||
            'description': video_description,
 | 
			
		||||
            'thumbnail':   video_thumbnail,
 | 
			
		||||
            'uploader':    video_uploader,
 | 
			
		||||
            'thumbnail': video_thumbnail,
 | 
			
		||||
            'uploader': video_uploader,
 | 
			
		||||
            'upload_date': video_upload_date,
 | 
			
		||||
            'subtitles':   subtitles,
 | 
			
		||||
            'formats':     formats,
 | 
			
		||||
            'subtitles': subtitles,
 | 
			
		||||
            'formats': formats,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
#coding: utf-8
 | 
			
		||||
# coding: utf-8
 | 
			
		||||
from __future__ import unicode_literals
 | 
			
		||||
 | 
			
		||||
import re
 | 
			
		||||
@@ -18,6 +18,7 @@ from ..utils import (
 | 
			
		||||
    unescapeHTML,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DailymotionBaseInfoExtractor(InfoExtractor):
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def _build_request(url):
 | 
			
		||||
@@ -27,6 +28,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
 | 
			
		||||
        request.add_header('Cookie', 'ff=off')
 | 
			
		||||
        return request
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 | 
			
		||||
    """Information Extractor for Dailymotion"""
 | 
			
		||||
 | 
			
		||||
@@ -112,7 +114,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
 | 
			
		||||
        embed_page = self._download_webpage(embed_url, video_id,
 | 
			
		||||
                                            'Downloading embed page')
 | 
			
		||||
        info = self._search_regex(r'var info = ({.*?}),$', embed_page,
 | 
			
		||||
            'video info', flags=re.MULTILINE)
 | 
			
		||||
                                  'video info', flags=re.MULTILINE)
 | 
			
		||||
        info = json.loads(info)
 | 
			
		||||
        if info.get('error') is not None:
 | 
			
		||||
            msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
 | 
			
		||||
@@ -206,7 +208,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
 | 
			
		||||
            if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
 | 
			
		||||
                break
 | 
			
		||||
        return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
 | 
			
		||||
                   for video_id in orderedSet(video_ids)]
 | 
			
		||||
                for video_id in orderedSet(video_ids)]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
        mobj = re.match(self._VALID_URL, url)
 | 
			
		||||
 
 | 
			
		||||
@@ -9,7 +9,7 @@ from .common import InfoExtractor
 | 
			
		||||
class DefenseGouvFrIE(InfoExtractor):
 | 
			
		||||
    IE_NAME = 'defense.gouv.fr'
 | 
			
		||||
    _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
 | 
			
		||||
        r'ligthboxvideo/base-de-medias/webtv/(.*)')
 | 
			
		||||
                  r'ligthboxvideo/base-de-medias/webtv/(.*)')
 | 
			
		||||
 | 
			
		||||
    _TEST = {
 | 
			
		||||
        'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
 | 
			
		||||
@@ -26,13 +26,13 @@ class DefenseGouvFrIE(InfoExtractor):
 | 
			
		||||
        video_id = self._search_regex(
 | 
			
		||||
            r"flashvars.pvg_id=\"(\d+)\";",
 | 
			
		||||
            webpage, 'ID')
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
 | 
			
		||||
            + video_id)
 | 
			
		||||
                    + video_id)
 | 
			
		||||
        info = self._download_webpage(json_url, title,
 | 
			
		||||
                                                  'Downloading JSON config')
 | 
			
		||||
                                      'Downloading JSON config')
 | 
			
		||||
        video_url = json.loads(info)['renditions'][0]['url']
 | 
			
		||||
        
 | 
			
		||||
 | 
			
		||||
        return {'id': video_id,
 | 
			
		||||
                'ext': 'mp4',
 | 
			
		||||
                'url': video_url,
 | 
			
		||||
 
 | 
			
		||||
@@ -16,9 +16,9 @@ class DiscoveryIE(InfoExtractor):
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'MythBusters: Mission Impossible Outtakes',
 | 
			
		||||
            'description': ('Watch Jamie Hyneman and Adam Savage practice being'
 | 
			
		||||
                ' each other -- to the point of confusing Jamie\'s dog -- and '
 | 
			
		||||
                'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
 | 
			
		||||
                ' back.'),
 | 
			
		||||
                            ' each other -- to the point of confusing Jamie\'s dog -- and '
 | 
			
		||||
                            'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
 | 
			
		||||
                            ' back.'),
 | 
			
		||||
            'duration': 156,
 | 
			
		||||
        },
 | 
			
		||||
    }
 | 
			
		||||
@@ -29,7 +29,7 @@ class DiscoveryIE(InfoExtractor):
 | 
			
		||||
        webpage = self._download_webpage(url, video_id)
 | 
			
		||||
 | 
			
		||||
        video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
 | 
			
		||||
            webpage, 'video list', flags=re.DOTALL)
 | 
			
		||||
                                             webpage, 'video list', flags=re.DOTALL)
 | 
			
		||||
        video_list = json.loads(video_list_json)
 | 
			
		||||
        info = video_list['clips'][0]
 | 
			
		||||
        formats = []
 | 
			
		||||
 
 | 
			
		||||
@@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
 | 
			
		||||
        video_id = mobj.group('id')
 | 
			
		||||
        info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
 | 
			
		||||
        info = self._download_json(info_url, video_id)
 | 
			
		||||
        date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
 | 
			
		||||
        date = time.gmtime(info['dateCreated'] / 1000)  # The timestamp is in miliseconds
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
 
 | 
			
		||||
@@ -11,18 +11,18 @@ from ..utils import url_basename
 | 
			
		||||
 | 
			
		||||
class DropboxIE(InfoExtractor):
 | 
			
		||||
    _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
 | 
			
		||||
    _TESTS = [{
 | 
			
		||||
        'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
 | 
			
		||||
        'info_dict': {
 | 
			
		||||
            'id': 'nelirfsxnmcfbfh',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'youtube-dl test video \'ä"BaW_jenozKc'
 | 
			
		||||
        }
 | 
			
		||||
    },
 | 
			
		||||
    {
 | 
			
		||||
        'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
 | 
			
		||||
        'only_matching': True,
 | 
			
		||||
    },
 | 
			
		||||
    _TESTS = [
 | 
			
		||||
        {
 | 
			
		||||
            'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
 | 
			
		||||
            'info_dict': {
 | 
			
		||||
                'id': 'nelirfsxnmcfbfh',
 | 
			
		||||
                'ext': 'mp4',
 | 
			
		||||
                'title': 'youtube-dl test video \'ä"BaW_jenozKc'
 | 
			
		||||
            }
 | 
			
		||||
        }, {
 | 
			
		||||
            'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
 | 
			
		||||
            'only_matching': True,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    def _real_extract(self, url):
 | 
			
		||||
 
 | 
			
		||||
@@ -28,7 +28,7 @@ class EHowIE(InfoExtractor):
 | 
			
		||||
        video_id = mobj.group('id')
 | 
			
		||||
        webpage = self._download_webpage(url, video_id)
 | 
			
		||||
        video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
 | 
			
		||||
            webpage, 'video URL')
 | 
			
		||||
                                       webpage, 'video URL')
 | 
			
		||||
        final_url = compat_urllib_parse.unquote(video_url)
 | 
			
		||||
        uploader = self._html_search_meta('uploader', webpage)
 | 
			
		||||
        title = self._og_search_title(webpage).replace(' | eHow', '')
 | 
			
		||||
 
 | 
			
		||||
@@ -125,7 +125,7 @@ class EightTracksIE(InfoExtractor):
 | 
			
		||||
            info = {
 | 
			
		||||
                'id': compat_str(track_data['id']),
 | 
			
		||||
                'url': track_data['track_file_stream_url'],
 | 
			
		||||
                'title': track_data['performer'] + u' - ' + track_data['name'],
 | 
			
		||||
                'title': track_data['performer'] + ' - ' + track_data['name'],
 | 
			
		||||
                'raw_title': track_data['name'],
 | 
			
		||||
                'uploader_id': data['user']['login'],
 | 
			
		||||
                'ext': 'm4a',
 | 
			
		||||
 
 | 
			
		||||
@@ -20,7 +20,7 @@ class EpornerIE(InfoExtractor):
 | 
			
		||||
            'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
 | 
			
		||||
            'ext': 'mp4',
 | 
			
		||||
            'title': 'Infamous Tiffany Teen Strip Tease Video',
 | 
			
		||||
            'duration': 194,
 | 
			
		||||
            'duration': 1838,
 | 
			
		||||
            'view_count': int,
 | 
			
		||||
            'age_limit': 18,
 | 
			
		||||
        }
 | 
			
		||||
@@ -57,9 +57,7 @@ class EpornerIE(InfoExtractor):
 | 
			
		||||
            formats.append(fmt)
 | 
			
		||||
        self._sort_formats(formats)
 | 
			
		||||
 | 
			
		||||
        duration = parse_duration(self._search_regex(
 | 
			
		||||
            r'class="mbtim">([0-9:]+)</div>', webpage, 'duration',
 | 
			
		||||
            fatal=False))
 | 
			
		||||
        duration = parse_duration(self._html_search_meta('duration', webpage))
 | 
			
		||||
        view_count = str_to_int(self._search_regex(
 | 
			
		||||
            r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
 | 
			
		||||
            webpage, 'view count', fatal=False))
 | 
			
		||||
 
 | 
			
		||||
@@ -60,8 +60,8 @@ class FacebookIE(InfoExtractor):
 | 
			
		||||
        login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
 | 
			
		||||
        login_page_req.add_header('Cookie', 'locale=en_US')
 | 
			
		||||
        login_page = self._download_webpage(login_page_req, None,
 | 
			
		||||
            note='Downloading login page',
 | 
			
		||||
            errnote='Unable to download login page')
 | 
			
		||||
                                            note='Downloading login page',
 | 
			
		||||
                                            errnote='Unable to download login page')
 | 
			
		||||
        lsd = self._search_regex(
 | 
			
		||||
            r'<input type="hidden" name="lsd" value="([^"]*)"',
 | 
			
		||||
            login_page, 'lsd')
 | 
			
		||||
@@ -77,12 +77,12 @@ class FacebookIE(InfoExtractor):
 | 
			
		||||
            'legacy_return': '1',
 | 
			
		||||
            'timezone': '-60',
 | 
			
		||||
            'trynum': '1',
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
 | 
			
		||||
        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
 | 
			
		||||
        try:
 | 
			
		||||
            login_results = self._download_webpage(request, None,
 | 
			
		||||
                note='Logging in', errnote='unable to fetch login page')
 | 
			
		||||
                                                   note='Logging in', errnote='unable to fetch login page')
 | 
			
		||||
            if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
 | 
			
		||||
                self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
 | 
			
		||||
                return
 | 
			
		||||
@@ -96,7 +96,7 @@ class FacebookIE(InfoExtractor):
 | 
			
		||||
            check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
 | 
			
		||||
            check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
 | 
			
		||||
            check_response = self._download_webpage(check_req, None,
 | 
			
		||||
                note='Confirming login')
 | 
			
		||||
                                                    note='Confirming login')
 | 
			
		||||
            if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
 | 
			
		||||
                self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
 | 
			
		||||
        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
 | 
			
		||||
 
 | 
			
		||||
@@ -40,7 +40,7 @@ class FC2IE(InfoExtractor):
 | 
			
		||||
 | 
			
		||||
        info_url = (
 | 
			
		||||
            "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
 | 
			
		||||
            format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E')))
 | 
			
		||||
            format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
 | 
			
		||||
 | 
			
		||||
        info_webpage = self._download_webpage(
 | 
			
		||||
            info_url, video_id, note='Downloading info page')
 | 
			
		||||
 
 | 
			
		||||
@@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor):
 | 
			
		||||
        duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
 | 
			
		||||
 | 
			
		||||
        like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
 | 
			
		||||
            webpage, 'like count', fatal=False)
 | 
			
		||||
                                             webpage, 'like count', fatal=False)
 | 
			
		||||
        dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
 | 
			
		||||
            webpage, 'dislike count', fatal=False)
 | 
			
		||||
                                                webpage, 'dislike count', fatal=False)
 | 
			
		||||
 | 
			
		||||
        return {
 | 
			
		||||
            'id': video_id,
 | 
			
		||||
@@ -57,4 +57,4 @@ class FirstTVIE(InfoExtractor):
 | 
			
		||||
            'duration': int_or_none(duration),
 | 
			
		||||
            'like_count': int_or_none(like_count),
 | 
			
		||||
            'dislike_count': int_or_none(dislike_count),
 | 
			
		||||
        }
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -50,7 +50,7 @@ class FiveMinIE(InfoExtractor):
 | 
			
		||||
        video_id = mobj.group('id')
 | 
			
		||||
        embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
 | 
			
		||||
        embed_page = self._download_webpage(embed_url, video_id,
 | 
			
		||||
            'Downloading embed page')
 | 
			
		||||
                                            'Downloading embed page')
 | 
			
		||||
        sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
 | 
			
		||||
        query = compat_urllib_parse.urlencode({
 | 
			
		||||
            'func': 'GetResults',
 | 
			
		||||
 
 | 
			
		||||
@@ -32,9 +32,9 @@ class FKTVIE(InfoExtractor):
 | 
			
		||||
        server = random.randint(2, 4)
 | 
			
		||||
        video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
 | 
			
		||||
        start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
 | 
			
		||||
            episode)
 | 
			
		||||
                                               episode)
 | 
			
		||||
        playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
 | 
			
		||||
            'playlist', flags=re.DOTALL)
 | 
			
		||||
                                      'playlist', flags=re.DOTALL)
 | 
			
		||||
        files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
 | 
			
		||||
        # TODO: return a single multipart video
 | 
			
		||||
        videos = []
 | 
			
		||||
 
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user