Compare commits
48 Commits
2013.02.01
...
2013.02.22
Author | SHA1 | Date | |
---|---|---|---|
![]() |
ea05129ebd | ||
![]() |
4be0aa3539 | ||
![]() |
f636c34481 | ||
![]() |
3bf79c752e | ||
![]() |
8271226a55 | ||
![]() |
1013186a17 | ||
![]() |
7c038b3c32 | ||
![]() |
c8cd8e5f55 | ||
![]() |
471cf47796 | ||
![]() |
d8f64574a4 | ||
![]() |
e711babbd1 | ||
![]() |
a72b0f2b6f | ||
![]() |
434eb6f26b | ||
![]() |
197080b10b | ||
![]() |
7796e8c2cb | ||
![]() |
6d4363368a | ||
![]() |
414638cd50 | ||
![]() |
2a9983b78f | ||
![]() |
b17c974a88 | ||
![]() |
5717d91ab7 | ||
![]() |
79eb0287ab | ||
![]() |
58994225bc | ||
![]() |
59d4c2fe1b | ||
![]() |
3a468f2d8b | ||
![]() |
1ad5d872b9 | ||
![]() |
355fc8e944 | ||
![]() |
380a29dbf7 | ||
![]() |
1528d6642d | ||
![]() |
7311fef854 | ||
![]() |
906417c7c5 | ||
![]() |
6aabe82035 | ||
![]() |
f0877a445e | ||
![]() |
da06e2daf8 | ||
![]() |
d3f5f9f6b9 | ||
![]() |
bfc6ea7935 | ||
![]() |
8edc2cf8ca | ||
![]() |
fb778e66df | ||
![]() |
3a9918d37f | ||
![]() |
ccb0cae134 | ||
![]() |
085c8b75a6 | ||
![]() |
dbf2ba3d61 | ||
![]() |
b47bbac393 | ||
![]() |
229cac754a | ||
![]() |
0e33684194 | ||
![]() |
9e982f9e4e | ||
![]() |
c7a725cfad | ||
![]() |
450a30cae8 | ||
![]() |
a32b573ccb |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -17,3 +17,4 @@ youtube-dl.tar.gz
|
|||||||
.coverage
|
.coverage
|
||||||
cover/
|
cover/
|
||||||
updates_key.pem
|
updates_key.pem
|
||||||
|
*.egg-info
|
||||||
|
@@ -1,3 +1,5 @@
|
|||||||
include README.md
|
include README.md
|
||||||
include test/*.py
|
include test/*.py
|
||||||
include test/*.json
|
include test/*.json
|
||||||
|
include youtube-dl.bash-completion
|
||||||
|
include youtube-dl.1
|
||||||
|
11
Makefile
11
Makefile
@@ -1,7 +1,10 @@
|
|||||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
|
rm -rf youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
|
||||||
|
|
||||||
|
cleanall: clean
|
||||||
|
rm -f youtube-dl youtube-dl.exe
|
||||||
|
|
||||||
PREFIX=/usr/local
|
PREFIX=/usr/local
|
||||||
BINDIR=$(PREFIX)/bin
|
BINDIR=$(PREFIX)/bin
|
||||||
@@ -23,7 +26,9 @@ test:
|
|||||||
|
|
||||||
tar: youtube-dl.tar.gz
|
tar: youtube-dl.tar.gz
|
||||||
|
|
||||||
.PHONY: all clean install test tar
|
.PHONY: all clean install test tar bash-completion pypi-files
|
||||||
|
|
||||||
|
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
|
||||||
|
|
||||||
youtube-dl: youtube_dl/*.py
|
youtube-dl: youtube_dl/*.py
|
||||||
zip --quiet youtube-dl youtube_dl/*.py
|
zip --quiet youtube-dl youtube_dl/*.py
|
||||||
@@ -45,6 +50,8 @@ youtube-dl.1: README.md
|
|||||||
youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
|
youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
|
||||||
python devscripts/bash-completion.py
|
python devscripts/bash-completion.py
|
||||||
|
|
||||||
|
bash-completion: youtube-dl.bash-completion
|
||||||
|
|
||||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
|
@@ -38,6 +38,10 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
--reject-title REGEX skip download for matching titles (regex or
|
--reject-title REGEX skip download for matching titles (regex or
|
||||||
caseless sub-string)
|
caseless sub-string)
|
||||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||||
|
--min-filesize SIZE Do not download any videos smaller than SIZE (e.g.
|
||||||
|
50k or 44.6m)
|
||||||
|
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
|
||||||
|
50k or 44.6m)
|
||||||
|
|
||||||
## Filesystem Options:
|
## Filesystem Options:
|
||||||
-t, --title use title in file name
|
-t, --title use title in file name
|
||||||
@@ -81,6 +85,7 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
--get-description simulate, quiet but print video description
|
--get-description simulate, quiet but print video description
|
||||||
--get-filename simulate, quiet but print output filename
|
--get-filename simulate, quiet but print output filename
|
||||||
--get-format simulate, quiet but print output format
|
--get-format simulate, quiet but print output format
|
||||||
|
--newline output progress bar as new lines
|
||||||
--no-progress do not print progress bar
|
--no-progress do not print progress bar
|
||||||
--console-title display progress in console titlebar
|
--console-title display progress in console titlebar
|
||||||
-v, --verbose print various debugging information
|
-v, --verbose print various debugging information
|
||||||
|
@@ -20,19 +20,19 @@ if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already pre
|
|||||||
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
|
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
|
||||||
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
||||||
|
|
||||||
echo "\n### First of all, testing..."
|
/bin/echo -e "\n### First of all, testing..."
|
||||||
make clean
|
make cleanall
|
||||||
nosetests --with-coverage --cover-package=youtube_dl --cover-html test || exit 1
|
nosetests --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
|
||||||
|
|
||||||
echo "\n### Changing version in version.py..."
|
/bin/echo -e "\n### Changing version in version.py..."
|
||||||
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
||||||
|
|
||||||
echo "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
|
/bin/echo -e "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
|
||||||
make README.md
|
make README.md
|
||||||
git add CHANGELOG README.md youtube_dl/version.py
|
git add CHANGELOG README.md youtube_dl/version.py
|
||||||
git commit -m "release $version"
|
git commit -m "release $version"
|
||||||
|
|
||||||
echo "\n### Now tagging, signing and pushing..."
|
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
||||||
git tag -s -m "Release $version" "$version"
|
git tag -s -m "Release $version" "$version"
|
||||||
git show "$version"
|
git show "$version"
|
||||||
read -p "Is it good, can I push? (y/n) " -n 1
|
read -p "Is it good, can I push? (y/n) " -n 1
|
||||||
@@ -42,7 +42,7 @@ MASTER=$(git rev-parse --abbrev-ref HEAD)
|
|||||||
git push origin $MASTER:master
|
git push origin $MASTER:master
|
||||||
git push origin "$version"
|
git push origin "$version"
|
||||||
|
|
||||||
echo "\n### OK, now it is time to build the binaries..."
|
/bin/echo -e "\n### OK, now it is time to build the binaries..."
|
||||||
REV=$(git rev-parse HEAD)
|
REV=$(git rev-parse HEAD)
|
||||||
make youtube-dl youtube-dl.tar.gz
|
make youtube-dl youtube-dl.tar.gz
|
||||||
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
|
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
|
||||||
@@ -57,11 +57,11 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
|
|||||||
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
||||||
git checkout HEAD -- youtube-dl youtube-dl.exe
|
git checkout HEAD -- youtube-dl youtube-dl.exe
|
||||||
|
|
||||||
echo "\n### Signing and uploading the new binaries to youtube-dl.org..."
|
/bin/echo -e "\n### Signing and uploading the new binaries to youtube-dl.org..."
|
||||||
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
|
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
|
||||||
scp -r "build/$version" ytdl@youtube-dl.org:html/downloads/
|
scp -r "build/$version" ytdl@youtube-dl.org:html/downloads/
|
||||||
|
|
||||||
echo "\n### Now switching to gh-pages..."
|
/bin/echo -e "\n### Now switching to gh-pages..."
|
||||||
git clone --branch gh-pages --single-branch . build/gh-pages
|
git clone --branch gh-pages --single-branch . build/gh-pages
|
||||||
ROOT=$(pwd)
|
ROOT=$(pwd)
|
||||||
(
|
(
|
||||||
@@ -83,4 +83,9 @@ ROOT=$(pwd)
|
|||||||
)
|
)
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
||||||
echo "\n### DONE!"
|
make pypi-files
|
||||||
|
echo "Uploading to PyPi ..."
|
||||||
|
python setup.py sdist upload
|
||||||
|
make clean
|
||||||
|
|
||||||
|
/bin/echo -e "\n### DONE!"
|
||||||
|
6
setup.py
6
setup.py
@@ -2,10 +2,14 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
from distutils.core import setup
|
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
from setuptools import setup
|
||||||
|
except ImportError:
|
||||||
|
from distutils.core import setup
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import py2exe
|
import py2exe
|
||||||
"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
|
"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
|
||||||
|
@@ -76,7 +76,8 @@
|
|||||||
"name": "StanfordOpenClassroom",
|
"name": "StanfordOpenClassroom",
|
||||||
"md5": "544a9468546059d4e80d76265b0443b8",
|
"md5": "544a9468546059d4e80d76265b0443b8",
|
||||||
"url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
|
"url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
|
||||||
"file": "PracticalUnix_intro-environment.mp4"
|
"file": "PracticalUnix_intro-environment.mp4",
|
||||||
|
"skip": "Currently offline"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "XNXX",
|
"name": "XNXX",
|
||||||
@@ -275,5 +276,33 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Keek",
|
||||||
|
"url": "http://www.keek.com/ytdl/keeks/NODfbab",
|
||||||
|
"file": "NODfbab.mp4",
|
||||||
|
"md5": "9b0636f8c0f7614afa4ea5e4c6e57e83",
|
||||||
|
"info_dict": {
|
||||||
|
"title": "test chars: \"'/\\ä<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de ."
|
||||||
|
}
|
||||||
|
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "TED",
|
||||||
|
"url": "http://www.ted.com/talks/dan_dennett_on_our_consciousness.html",
|
||||||
|
"file": "102.mp4",
|
||||||
|
"md5": "7bc087e71d16f18f9b8ab9fa62a8a031",
|
||||||
|
"info_dict": {
|
||||||
|
"title": "Dan Dennett: The illusion of consciousness"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "MySpass",
|
||||||
|
"url": "http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/",
|
||||||
|
"file": "11741.mp4",
|
||||||
|
"md5": "0b49f4844a068f8b33f4b7c88405862b",
|
||||||
|
"info_dict": {
|
||||||
|
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
BIN
youtube-dl
BIN
youtube-dl
Binary file not shown.
@@ -82,6 +82,8 @@ class FileDownloader(object):
|
|||||||
subtitleslang: Language of the subtitles to download
|
subtitleslang: Language of the subtitles to download
|
||||||
test: Download only first bytes to test the downloader.
|
test: Download only first bytes to test the downloader.
|
||||||
keepvideo: Keep the video file after post-processing
|
keepvideo: Keep the video file after post-processing
|
||||||
|
min_filesize: Skip files smaller than this size
|
||||||
|
max_filesize: Skip files larger than this size
|
||||||
"""
|
"""
|
||||||
|
|
||||||
params = None
|
params = None
|
||||||
@@ -206,7 +208,7 @@ class FileDownloader(object):
|
|||||||
# already of type unicode()
|
# already of type unicode()
|
||||||
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
||||||
elif 'TERM' in os.environ:
|
elif 'TERM' in os.environ:
|
||||||
sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
|
self.to_screen('\033]0;%s\007' % message, skip_eol=True)
|
||||||
|
|
||||||
def fixed_template(self):
|
def fixed_template(self):
|
||||||
"""Checks if the output template is fixed."""
|
"""Checks if the output template is fixed."""
|
||||||
@@ -303,6 +305,10 @@ class FileDownloader(object):
|
|||||||
"""Report download progress."""
|
"""Report download progress."""
|
||||||
if self.params.get('noprogress', False):
|
if self.params.get('noprogress', False):
|
||||||
return
|
return
|
||||||
|
if self.params.get('progress_with_newline', False):
|
||||||
|
self.to_screen(u'[download] %s of %s at %s ETA %s' %
|
||||||
|
(percent_str, data_len_str, speed_str, eta_str))
|
||||||
|
else:
|
||||||
self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
|
self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
|
||||||
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
|
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
|
||||||
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
|
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
|
||||||
@@ -364,12 +370,10 @@ class FileDownloader(object):
|
|||||||
title = info_dict['title']
|
title = info_dict['title']
|
||||||
matchtitle = self.params.get('matchtitle', False)
|
matchtitle = self.params.get('matchtitle', False)
|
||||||
if matchtitle:
|
if matchtitle:
|
||||||
matchtitle = matchtitle.decode('utf8')
|
|
||||||
if not re.search(matchtitle, title, re.IGNORECASE):
|
if not re.search(matchtitle, title, re.IGNORECASE):
|
||||||
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
|
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
|
||||||
rejecttitle = self.params.get('rejecttitle', False)
|
rejecttitle = self.params.get('rejecttitle', False)
|
||||||
if rejecttitle:
|
if rejecttitle:
|
||||||
rejecttitle = rejecttitle.decode('utf8')
|
|
||||||
if re.search(rejecttitle, title, re.IGNORECASE):
|
if re.search(rejecttitle, title, re.IGNORECASE):
|
||||||
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
||||||
return None
|
return None
|
||||||
@@ -712,6 +716,15 @@ class FileDownloader(object):
|
|||||||
data_len = data.info().get('Content-length', None)
|
data_len = data.info().get('Content-length', None)
|
||||||
if data_len is not None:
|
if data_len is not None:
|
||||||
data_len = int(data_len) + resume_len
|
data_len = int(data_len) + resume_len
|
||||||
|
min_data_len = self.params.get("min_filesize", None)
|
||||||
|
max_data_len = self.params.get("max_filesize", None)
|
||||||
|
if min_data_len is not None and data_len < min_data_len:
|
||||||
|
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||||
|
return False
|
||||||
|
if max_data_len is not None and data_len > max_data_len:
|
||||||
|
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
||||||
|
return False
|
||||||
|
|
||||||
data_len_str = self.format_bytes(data_len)
|
data_len_str = self.format_bytes(data_len)
|
||||||
byte_counter = 0 + resume_len
|
byte_counter = 0 + resume_len
|
||||||
block_size = self.params.get('buffersize', 1024)
|
block_size = self.params.get('buffersize', 1024)
|
||||||
|
@@ -151,7 +151,7 @@ class YoutubeIE(InfoExtractor):
|
|||||||
(?(1).+)? # if we found the ID, everything can follow
|
(?(1).+)? # if we found the ID, everything can follow
|
||||||
$"""
|
$"""
|
||||||
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
|
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
|
||||||
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
|
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
|
||||||
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
|
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
|
||||||
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
|
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
|
||||||
_NETRC_MACHINE = 'youtube'
|
_NETRC_MACHINE = 'youtube'
|
||||||
@@ -264,13 +264,18 @@ class YoutubeIE(InfoExtractor):
|
|||||||
srt_lang = list(srt_lang_list.keys())[0]
|
srt_lang = list(srt_lang_list.keys())[0]
|
||||||
if not srt_lang in srt_lang_list:
|
if not srt_lang in srt_lang_list:
|
||||||
return (u'WARNING: no closed captions found in the specified language', None)
|
return (u'WARNING: no closed captions found in the specified language', None)
|
||||||
request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
|
params = compat_urllib_parse.urlencode({
|
||||||
|
'lang': srt_lang,
|
||||||
|
'name': srt_lang_list[srt_lang].encode('utf-8'),
|
||||||
|
'v': video_id,
|
||||||
|
})
|
||||||
|
url = 'http://www.youtube.com/api/timedtext?' + params
|
||||||
try:
|
try:
|
||||||
srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8')
|
srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8')
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
|
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
|
||||||
if not srt_xml:
|
if not srt_xml:
|
||||||
return (u'WARNING: unable to download video subtitles', None)
|
return (u'WARNING: Did not fetch video subtitles', None)
|
||||||
return (None, self._closed_captions_xml_to_srt(srt_xml))
|
return (None, self._closed_captions_xml_to_srt(srt_xml))
|
||||||
|
|
||||||
def _print_formats(self, formats):
|
def _print_formats(self, formats):
|
||||||
@@ -315,19 +320,54 @@ class YoutubeIE(InfoExtractor):
|
|||||||
if username is None:
|
if username is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
request = compat_urllib_request.Request(self._LOGIN_URL)
|
||||||
|
try:
|
||||||
|
login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
|
||||||
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
|
self._downloader.to_stderr(u'WARNING: unable to fetch login page: %s' % compat_str(err))
|
||||||
|
return
|
||||||
|
|
||||||
|
galx = None
|
||||||
|
dsh = None
|
||||||
|
match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
|
||||||
|
if match:
|
||||||
|
galx = match.group(1)
|
||||||
|
|
||||||
|
match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
|
||||||
|
if match:
|
||||||
|
dsh = match.group(1)
|
||||||
|
|
||||||
# Log in
|
# Log in
|
||||||
login_form = {
|
login_form_strs = {
|
||||||
'current_form': 'loginForm',
|
u'continue': u'http://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
|
||||||
'next': '/',
|
u'Email': username,
|
||||||
'action_login': 'Log In',
|
u'GALX': galx,
|
||||||
'username': username,
|
u'Passwd': password,
|
||||||
'password': password,
|
u'PersistentCookie': u'yes',
|
||||||
|
u'_utf8': u'霱',
|
||||||
|
u'bgresponse': u'js_disabled',
|
||||||
|
u'checkConnection': u'',
|
||||||
|
u'checkedDomains': u'youtube',
|
||||||
|
u'dnConn': u'',
|
||||||
|
u'dsh': dsh,
|
||||||
|
u'pstMsg': u'0',
|
||||||
|
u'rmShown': u'1',
|
||||||
|
u'secTok': u'',
|
||||||
|
u'signIn': u'Sign in',
|
||||||
|
u'timeStmp': u'',
|
||||||
|
u'service': u'youtube',
|
||||||
|
u'uilel': u'3',
|
||||||
|
u'hl': u'en_US',
|
||||||
}
|
}
|
||||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
|
||||||
|
# chokes on unicode
|
||||||
|
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
|
||||||
|
login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
|
||||||
|
request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
|
||||||
try:
|
try:
|
||||||
self.report_login()
|
self.report_login()
|
||||||
login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
|
login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
|
||||||
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
|
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
|
||||||
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
|
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
|
||||||
return
|
return
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
@@ -678,6 +718,7 @@ class DailymotionIE(InfoExtractor):
|
|||||||
|
|
||||||
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
|
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
|
||||||
IE_NAME = u'dailymotion'
|
IE_NAME = u'dailymotion'
|
||||||
|
_WORKING = False
|
||||||
|
|
||||||
def __init__(self, downloader=None):
|
def __init__(self, downloader=None):
|
||||||
InfoExtractor.__init__(self, downloader)
|
InfoExtractor.__init__(self, downloader)
|
||||||
@@ -973,7 +1014,7 @@ class VimeoIE(InfoExtractor):
|
|||||||
"""Information extractor for vimeo.com."""
|
"""Information extractor for vimeo.com."""
|
||||||
|
|
||||||
# _VALID_URL matches Vimeo URLs
|
# _VALID_URL matches Vimeo URLs
|
||||||
_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'
|
_VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
|
||||||
IE_NAME = u'vimeo'
|
IE_NAME = u'vimeo'
|
||||||
|
|
||||||
def __init__(self, downloader=None):
|
def __init__(self, downloader=None):
|
||||||
@@ -994,7 +1035,11 @@ class VimeoIE(InfoExtractor):
|
|||||||
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
||||||
return
|
return
|
||||||
|
|
||||||
video_id = mobj.group(1)
|
video_id = mobj.group('id')
|
||||||
|
if not mobj.group('proto'):
|
||||||
|
url = 'https://' + url
|
||||||
|
if mobj.group('direct_link'):
|
||||||
|
url = 'https://vimeo.com/' + video_id
|
||||||
|
|
||||||
# Retrieve video webpage to extract further information
|
# Retrieve video webpage to extract further information
|
||||||
request = compat_urllib_request.Request(url, None, std_headers)
|
request = compat_urllib_request.Request(url, None, std_headers)
|
||||||
@@ -1285,7 +1330,7 @@ class GenericIE(InfoExtractor):
|
|||||||
opener = compat_urllib_request.OpenerDirector()
|
opener = compat_urllib_request.OpenerDirector()
|
||||||
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
|
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
|
||||||
HTTPMethodFallback, HEADRedirectHandler,
|
HTTPMethodFallback, HEADRedirectHandler,
|
||||||
compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
|
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
|
||||||
opener.add_handler(handler())
|
opener.add_handler(handler())
|
||||||
|
|
||||||
response = opener.open(HeadRequest(url))
|
response = opener.open(HeadRequest(url))
|
||||||
@@ -1321,6 +1366,9 @@ class GenericIE(InfoExtractor):
|
|||||||
if mobj is None:
|
if mobj is None:
|
||||||
# Broaden the search a little bit
|
# Broaden the search a little bit
|
||||||
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
||||||
|
if mobj is None:
|
||||||
|
# Broaden the search a little bit: JWPlayer JS loader
|
||||||
|
mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
||||||
return
|
return
|
||||||
@@ -2053,6 +2101,10 @@ class FacebookIE(InfoExtractor):
|
|||||||
params_raw = compat_urllib_parse.unquote(data['params'])
|
params_raw = compat_urllib_parse.unquote(data['params'])
|
||||||
params = json.loads(params_raw)
|
params = json.loads(params_raw)
|
||||||
video_url = params['hd_src']
|
video_url = params['hd_src']
|
||||||
|
if not video_url:
|
||||||
|
video_url = params['sd_src']
|
||||||
|
if not video_url:
|
||||||
|
raise ExtractorError(u'Cannot find video URL')
|
||||||
video_duration = int(params['video_duration'])
|
video_duration = int(params['video_duration'])
|
||||||
|
|
||||||
m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
|
m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
|
||||||
@@ -2188,7 +2240,7 @@ class MyVideoIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(webpage_url, video_id)
|
webpage = self._download_webpage(webpage_url, video_id)
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
|
mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
|
||||||
webpage)
|
webpage)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
self._downloader.trouble(u'ERROR: unable to extract media URL')
|
||||||
@@ -3680,13 +3732,13 @@ class YouPornIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(req, video_id)
|
webpage = self._download_webpage(req, video_id)
|
||||||
|
|
||||||
# Get the video title
|
# Get the video title
|
||||||
result = re.search(r'videoTitleArea">(?P<title>.*)</h1>', webpage)
|
result = re.search(r'<h1.*?>(?P<title>.*)</h1>', webpage)
|
||||||
if result is None:
|
if result is None:
|
||||||
raise ExtractorError(u'ERROR: unable to extract video title')
|
raise ExtractorError(u'Unable to extract video title')
|
||||||
video_title = result.group('title').strip()
|
video_title = result.group('title').strip()
|
||||||
|
|
||||||
# Get the video date
|
# Get the video date
|
||||||
result = re.search(r'Date:</b>(?P<date>.*)</li>', webpage)
|
result = re.search(r'Date:</label>(?P<date>.*) </li>', webpage)
|
||||||
if result is None:
|
if result is None:
|
||||||
self._downloader.to_stderr(u'WARNING: unable to extract video date')
|
self._downloader.to_stderr(u'WARNING: unable to extract video date')
|
||||||
upload_date = None
|
upload_date = None
|
||||||
@@ -3694,9 +3746,9 @@ class YouPornIE(InfoExtractor):
|
|||||||
upload_date = result.group('date').strip()
|
upload_date = result.group('date').strip()
|
||||||
|
|
||||||
# Get the video uploader
|
# Get the video uploader
|
||||||
result = re.search(r'Submitted:</b>(?P<uploader>.*)</li>', webpage)
|
result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
|
||||||
if result is None:
|
if result is None:
|
||||||
self._downloader.to_stderr(u'ERROR: unable to extract uploader')
|
self._downloader.to_stderr(u'WARNING: unable to extract uploader')
|
||||||
video_uploader = None
|
video_uploader = None
|
||||||
else:
|
else:
|
||||||
video_uploader = result.group('uploader').strip()
|
video_uploader = result.group('uploader').strip()
|
||||||
@@ -3868,7 +3920,7 @@ class EightTracksIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
m = re.search(r"new TRAX.Mix\((.*?)\);\n*\s*TRAX.initSearchAutocomplete\('#search'\);", webpage, flags=re.DOTALL)
|
m = re.search(r"PAGE.mix = (.*?);\n", webpage, flags=re.DOTALL)
|
||||||
if not m:
|
if not m:
|
||||||
raise ExtractorError(u'Cannot find trax information')
|
raise ExtractorError(u'Cannot find trax information')
|
||||||
json_like = m.group(1)
|
json_like = m.group(1)
|
||||||
@@ -3900,6 +3952,158 @@ class EightTracksIE(InfoExtractor):
|
|||||||
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
|
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
class KeekIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
|
||||||
|
IE_NAME = u'keek'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
m = re.match(self._VALID_URL, url)
|
||||||
|
video_id = m.group('videoID')
|
||||||
|
video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
|
||||||
|
thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
|
||||||
|
title = unescapeHTML(m.group('title'))
|
||||||
|
m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
|
||||||
|
uploader = unescapeHTML(m.group('uploader'))
|
||||||
|
info = {
|
||||||
|
'id':video_id,
|
||||||
|
'url':video_url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'uploader': uploader
|
||||||
|
}
|
||||||
|
return [info]
|
||||||
|
|
||||||
|
class TEDIE(InfoExtractor):
|
||||||
|
_VALID_URL=r'''http://www.ted.com/
|
||||||
|
(
|
||||||
|
((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
|
||||||
|
|
|
||||||
|
((?P<type_talk>talks)) # We have a simple talk
|
||||||
|
)
|
||||||
|
/(?P<name>\w+) # Here goes the name and then ".html"
|
||||||
|
'''
|
||||||
|
|
||||||
|
def suitable(self, url):
|
||||||
|
"""Receives a URL and returns True if suitable for this IE."""
|
||||||
|
return re.match(self._VALID_URL, url, re.VERBOSE) is not None
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
m=re.match(self._VALID_URL, url, re.VERBOSE)
|
||||||
|
if m.group('type_talk'):
|
||||||
|
return [self._talk_info(url)]
|
||||||
|
else :
|
||||||
|
playlist_id=m.group('playlist_id')
|
||||||
|
name=m.group('name')
|
||||||
|
self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
|
||||||
|
return self._playlist_videos_info(url,name,playlist_id)
|
||||||
|
|
||||||
|
def _talk_video_link(self,mediaSlug):
|
||||||
|
'''Returns the video link for that mediaSlug'''
|
||||||
|
return 'http://download.ted.com/talks/%s.mp4' % mediaSlug
|
||||||
|
|
||||||
|
def _playlist_videos_info(self,url,name,playlist_id=0):
|
||||||
|
'''Returns the videos of the playlist'''
|
||||||
|
video_RE=r'''
|
||||||
|
<li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
|
||||||
|
([.\s]*?)data-playlist_item_id="(\d+)"
|
||||||
|
([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
|
||||||
|
'''
|
||||||
|
video_name_RE=r'<p\ class="talk-title"><a href="/talks/(.+).html">(?P<fullname>.+?)</a></p>'
|
||||||
|
webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
|
||||||
|
m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
|
||||||
|
m_names=re.finditer(video_name_RE,webpage)
|
||||||
|
info=[]
|
||||||
|
for m_video, m_name in zip(m_videos,m_names):
|
||||||
|
video_dic={
|
||||||
|
'id': m_video.group('video_id'),
|
||||||
|
'url': self._talk_video_link(m_video.group('mediaSlug')),
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': m_name.group('fullname')
|
||||||
|
}
|
||||||
|
info.append(video_dic)
|
||||||
|
return info
|
||||||
|
def _talk_info(self, url, video_id=0):
|
||||||
|
"""Return the video for the talk in the url"""
|
||||||
|
m=re.match(self._VALID_URL, url,re.VERBOSE)
|
||||||
|
videoName=m.group('name')
|
||||||
|
webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
|
||||||
|
# If the url includes the language we get the title translated
|
||||||
|
title_RE=r'<h1><span id="altHeadline" >(?P<title>[\s\w:/\.\?=\+-\\\']*)</span></h1>'
|
||||||
|
title=re.search(title_RE, webpage).group('title')
|
||||||
|
info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
|
||||||
|
"id":(?P<videoID>[\d]+).*?
|
||||||
|
"mediaSlug":"(?P<mediaSlug>[\w\d]+?)"'''
|
||||||
|
info_match=re.search(info_RE,webpage,re.VERBOSE)
|
||||||
|
video_id=info_match.group('videoID')
|
||||||
|
mediaSlug=info_match.group('mediaSlug')
|
||||||
|
video_url=self._talk_video_link(mediaSlug)
|
||||||
|
info = {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': title
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
|
||||||
|
class MySpassIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://www.myspass.de/.*'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
|
||||||
|
|
||||||
|
# video id is the last path element of the URL
|
||||||
|
# usually there is a trailing slash, so also try the second but last
|
||||||
|
url_path = compat_urllib_parse_urlparse(url).path
|
||||||
|
url_parent_path, video_id = os.path.split(url_path)
|
||||||
|
if not video_id:
|
||||||
|
_, video_id = os.path.split(url_parent_path)
|
||||||
|
|
||||||
|
# get metadata
|
||||||
|
metadata_url = META_DATA_URL_TEMPLATE % video_id
|
||||||
|
metadata_text = self._download_webpage(metadata_url, video_id)
|
||||||
|
metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
|
||||||
|
|
||||||
|
# extract values from metadata
|
||||||
|
url_flv_el = metadata.find('url_flv')
|
||||||
|
if url_flv_el is None:
|
||||||
|
self._downloader.trouble(u'ERROR: unable to extract download url')
|
||||||
|
return
|
||||||
|
video_url = url_flv_el.text
|
||||||
|
extension = os.path.splitext(video_url)[1][1:]
|
||||||
|
title_el = metadata.find('title')
|
||||||
|
if title_el is None:
|
||||||
|
self._downloader.trouble(u'ERROR: unable to extract title')
|
||||||
|
return
|
||||||
|
title = title_el.text
|
||||||
|
format_id_el = metadata.find('format_id')
|
||||||
|
if format_id_el is None:
|
||||||
|
format = ext
|
||||||
|
else:
|
||||||
|
format = format_id_el.text
|
||||||
|
description_el = metadata.find('description')
|
||||||
|
if description_el is not None:
|
||||||
|
description = description_el.text
|
||||||
|
else:
|
||||||
|
description = None
|
||||||
|
imagePreview_el = metadata.find('imagePreview')
|
||||||
|
if imagePreview_el is not None:
|
||||||
|
thumbnail = imagePreview_el.text
|
||||||
|
else:
|
||||||
|
thumbnail = None
|
||||||
|
info = {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': title,
|
||||||
|
'ext': extension,
|
||||||
|
'format': format,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'description': description
|
||||||
|
}
|
||||||
|
return [info]
|
||||||
|
|
||||||
def gen_extractors():
|
def gen_extractors():
|
||||||
""" Return a list of an instance of every supported extractor.
|
""" Return a list of an instance of every supported extractor.
|
||||||
The order does matter; the first extractor matched is the one handling the URL.
|
The order does matter; the first extractor matched is the one handling the URL.
|
||||||
@@ -3946,6 +4150,9 @@ def gen_extractors():
|
|||||||
UstreamIE(),
|
UstreamIE(),
|
||||||
RBMARadioIE(),
|
RBMARadioIE(),
|
||||||
EightTracksIE(),
|
EightTracksIE(),
|
||||||
|
KeekIE(),
|
||||||
|
TEDIE(),
|
||||||
|
MySpassIE(),
|
||||||
GenericIE()
|
GenericIE()
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@@ -143,10 +143,10 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
|||||||
|
|
||||||
more_opts = []
|
more_opts = []
|
||||||
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
|
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
|
||||||
if self._preferredcodec == 'm4a' and filecodec == 'aac':
|
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
|
||||||
# Lossless, but in another container
|
# Lossless, but in another container
|
||||||
acodec = 'copy'
|
acodec = 'copy'
|
||||||
extension = self._preferredcodec
|
extension = 'm4a'
|
||||||
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
|
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
|
||||||
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
|
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
|
||||||
# Lossless if possible
|
# Lossless if possible
|
||||||
|
@@ -23,6 +23,7 @@ __authors__ = (
|
|||||||
'Dave Vasilevsky',
|
'Dave Vasilevsky',
|
||||||
'Jaime Marquínez Ferrándiz',
|
'Jaime Marquínez Ferrándiz',
|
||||||
'Jeff Crouse',
|
'Jeff Crouse',
|
||||||
|
'Osama Khalid',
|
||||||
)
|
)
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
@@ -150,6 +151,9 @@ def parseOpts():
|
|||||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
||||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
||||||
selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
|
selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
|
||||||
|
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||||
|
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||||
|
|
||||||
|
|
||||||
authentication.add_option('-u', '--username',
|
authentication.add_option('-u', '--username',
|
||||||
dest='username', metavar='USERNAME', help='account username')
|
dest='username', metavar='USERNAME', help='account username')
|
||||||
@@ -198,6 +202,8 @@ def parseOpts():
|
|||||||
verbosity.add_option('--get-format',
|
verbosity.add_option('--get-format',
|
||||||
action='store_true', dest='getformat',
|
action='store_true', dest='getformat',
|
||||||
help='simulate, quiet but print output format', default=False)
|
help='simulate, quiet but print output format', default=False)
|
||||||
|
verbosity.add_option('--newline',
|
||||||
|
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
||||||
verbosity.add_option('--no-progress',
|
verbosity.add_option('--no-progress',
|
||||||
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
||||||
verbosity.add_option('--console-title',
|
verbosity.add_option('--console-title',
|
||||||
@@ -206,7 +212,6 @@ def parseOpts():
|
|||||||
verbosity.add_option('-v', '--verbose',
|
verbosity.add_option('-v', '--verbose',
|
||||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
||||||
|
|
||||||
|
|
||||||
filesystem.add_option('-t', '--title',
|
filesystem.add_option('-t', '--title',
|
||||||
action='store_true', dest='usetitle', help='use title in file name', default=False)
|
action='store_true', dest='usetitle', help='use title in file name', default=False)
|
||||||
filesystem.add_option('--id',
|
filesystem.add_option('--id',
|
||||||
@@ -286,10 +291,13 @@ def _real_main():
|
|||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile)
|
jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile)
|
||||||
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
|
if os.access(opts.cookiefile, os.R_OK):
|
||||||
jar.load()
|
jar.load()
|
||||||
except (IOError, OSError) as err:
|
except (IOError, OSError) as err:
|
||||||
sys.exit(u'ERROR: unable to open cookie file')
|
if opts.verbose:
|
||||||
|
traceback.print_exc()
|
||||||
|
sys.stderr.write(u'ERROR: unable to open cookie file\n')
|
||||||
|
sys.exit(101)
|
||||||
# Set user agent
|
# Set user agent
|
||||||
if opts.user_agent is not None:
|
if opts.user_agent is not None:
|
||||||
std_headers['User-Agent'] = opts.user_agent
|
std_headers['User-Agent'] = opts.user_agent
|
||||||
@@ -349,6 +357,16 @@ def _real_main():
|
|||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error(u'invalid rate limit specified')
|
parser.error(u'invalid rate limit specified')
|
||||||
opts.ratelimit = numeric_limit
|
opts.ratelimit = numeric_limit
|
||||||
|
if opts.min_filesize is not None:
|
||||||
|
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
|
||||||
|
if numeric_limit is None:
|
||||||
|
parser.error(u'invalid min_filesize specified')
|
||||||
|
opts.min_filesize = numeric_limit
|
||||||
|
if opts.max_filesize is not None:
|
||||||
|
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
|
||||||
|
if numeric_limit is None:
|
||||||
|
parser.error(u'invalid max_filesize specified')
|
||||||
|
opts.max_filesize = numeric_limit
|
||||||
if opts.retries is not None:
|
if opts.retries is not None:
|
||||||
try:
|
try:
|
||||||
opts.retries = int(opts.retries)
|
opts.retries = int(opts.retries)
|
||||||
@@ -394,6 +412,7 @@ def _real_main():
|
|||||||
or (opts.useid and u'%(id)s.%(ext)s')
|
or (opts.useid and u'%(id)s.%(ext)s')
|
||||||
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
||||||
or u'%(id)s.%(ext)s')
|
or u'%(id)s.%(ext)s')
|
||||||
|
|
||||||
# File downloader
|
# File downloader
|
||||||
fd = FileDownloader({
|
fd = FileDownloader({
|
||||||
'usenetrc': opts.usenetrc,
|
'usenetrc': opts.usenetrc,
|
||||||
@@ -421,6 +440,7 @@ def _real_main():
|
|||||||
'noresizebuffer': opts.noresizebuffer,
|
'noresizebuffer': opts.noresizebuffer,
|
||||||
'continuedl': opts.continue_dl,
|
'continuedl': opts.continue_dl,
|
||||||
'noprogress': opts.noprogress,
|
'noprogress': opts.noprogress,
|
||||||
|
'progress_with_newline': opts.progress_with_newline,
|
||||||
'playliststart': opts.playliststart,
|
'playliststart': opts.playliststart,
|
||||||
'playlistend': opts.playlistend,
|
'playlistend': opts.playlistend,
|
||||||
'logtostderr': opts.outtmpl == '-',
|
'logtostderr': opts.outtmpl == '-',
|
||||||
@@ -431,13 +451,15 @@ def _real_main():
|
|||||||
'writeinfojson': opts.writeinfojson,
|
'writeinfojson': opts.writeinfojson,
|
||||||
'writesubtitles': opts.writesubtitles,
|
'writesubtitles': opts.writesubtitles,
|
||||||
'subtitleslang': opts.subtitleslang,
|
'subtitleslang': opts.subtitleslang,
|
||||||
'matchtitle': opts.matchtitle,
|
'matchtitle': decodeOption(opts.matchtitle),
|
||||||
'rejecttitle': opts.rejecttitle,
|
'rejecttitle': decodeOption(opts.rejecttitle),
|
||||||
'max_downloads': opts.max_downloads,
|
'max_downloads': opts.max_downloads,
|
||||||
'prefer_free_formats': opts.prefer_free_formats,
|
'prefer_free_formats': opts.prefer_free_formats,
|
||||||
'verbose': opts.verbose,
|
'verbose': opts.verbose,
|
||||||
'test': opts.test,
|
'test': opts.test,
|
||||||
'keepvideo': opts.keepvideo,
|
'keepvideo': opts.keepvideo,
|
||||||
|
'min_filesize': opts.min_filesize,
|
||||||
|
'max_filesize': opts.max_filesize
|
||||||
})
|
})
|
||||||
|
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
|
@@ -77,10 +77,8 @@ def update_self(to_screen, verbose, filename):
|
|||||||
|
|
||||||
to_screen(u'Updating to version ' + versions_info['latest'] + '...')
|
to_screen(u'Updating to version ' + versions_info['latest'] + '...')
|
||||||
version = versions_info['versions'][versions_info['latest']]
|
version = versions_info['versions'][versions_info['latest']]
|
||||||
if version.get('notes'):
|
|
||||||
to_screen(u'PLEASE NOTE:')
|
print_notes(version_info['versions'])
|
||||||
for note in version['notes']:
|
|
||||||
to_screen(note)
|
|
||||||
|
|
||||||
if not os.access(filename, os.W_OK):
|
if not os.access(filename, os.W_OK):
|
||||||
to_screen(u'ERROR: no write permissions on %s' % filename)
|
to_screen(u'ERROR: no write permissions on %s' % filename)
|
||||||
@@ -158,3 +156,13 @@ del "%s"
|
|||||||
return
|
return
|
||||||
|
|
||||||
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
|
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
|
||||||
|
|
||||||
|
def print_notes(versions, fromVersion=__version__):
|
||||||
|
notes = []
|
||||||
|
for v,vdata in sorted(versions.items()):
|
||||||
|
if v > fromVersion:
|
||||||
|
notes.extend(vdata.get('notes', []))
|
||||||
|
if notes:
|
||||||
|
to_screen(u'PLEASE NOTE:')
|
||||||
|
for note in notes:
|
||||||
|
to_screen(note)
|
||||||
|
@@ -420,6 +420,14 @@ def encodeFilename(s):
|
|||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
return s.encode(encoding, 'ignore')
|
return s.encode(encoding, 'ignore')
|
||||||
|
|
||||||
|
def decodeOption(optval):
|
||||||
|
if optval is None:
|
||||||
|
return optval
|
||||||
|
if isinstance(optval, bytes):
|
||||||
|
optval = optval.decode(preferredencoding())
|
||||||
|
|
||||||
|
assert isinstance(optval, compat_str)
|
||||||
|
return optval
|
||||||
|
|
||||||
class ExtractorError(Exception):
|
class ExtractorError(Exception):
|
||||||
"""Error during info extraction."""
|
"""Error during info extraction."""
|
||||||
|
@@ -1,2 +1,2 @@
|
|||||||
|
|
||||||
__version__ = '2013.02.01'
|
__version__ = '2013.02.22'
|
||||||
|
Reference in New Issue
Block a user