Compare commits
29 Commits
2014.08.21
...
2014.08.22
Author | SHA1 | Date | |
---|---|---|---|
![]() |
50b294aab8 | ||
![]() |
756b046f3e | ||
![]() |
388ac0b18a | ||
![]() |
ad06434bd3 | ||
![]() |
bd9820c937 | ||
![]() |
deda8ac376 | ||
![]() |
e05f693942 | ||
![]() |
b27295d2ab | ||
![]() |
ace52c5713 | ||
![]() |
e62e150f64 | ||
![]() |
c44c0a775d | ||
![]() |
5fcf2dbed0 | ||
![]() |
91dff03217 | ||
![]() |
a200f4cee2 | ||
![]() |
ea6e8d5454 | ||
![]() |
83d35817f5 | ||
![]() |
76beff70a8 | ||
![]() |
61882bf7c6 | ||
![]() |
cab317a680 | ||
![]() |
73159f99cc | ||
![]() |
c15235cd07 | ||
![]() |
12c3ec3382 | ||
![]() |
55db73efdf | ||
![]() |
1ce464aba9 | ||
![]() |
6994e70651 | ||
![]() |
c3f0b12b0f | ||
![]() |
27ace98f51 | ||
![]() |
a00d73c8c8 | ||
![]() |
7e660ac113 |
93
README.md
93
README.md
@@ -311,10 +311,12 @@ The current default template is `%(title)s-%(id)s.%(ext)s`.
|
||||
|
||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||
|
||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
|
||||
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
|
||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
|
||||
youtube-dl_test_video_.mp4 # A simple file name
|
||||
```bash
|
||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
|
||||
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
|
||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
|
||||
youtube-dl_test_video_.mp4 # A simple file name
|
||||
```
|
||||
|
||||
# VIDEO SELECTION
|
||||
|
||||
@@ -325,14 +327,16 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
|
||||
|
||||
Examples:
|
||||
|
||||
# Download only the videos uploaded in the last 6 months
|
||||
$ youtube-dl --dateafter now-6months
|
||||
```bash
|
||||
# Download only the videos uploaded in the last 6 months
|
||||
$ youtube-dl --dateafter now-6months
|
||||
|
||||
# Download only the videos uploaded on January 1, 1970
|
||||
$ youtube-dl --date 19700101
|
||||
# Download only the videos uploaded on January 1, 1970
|
||||
$ youtube-dl --date 19700101
|
||||
|
||||
$ # will only download the videos uploaded in the 200x decade
|
||||
$ youtube-dl --dateafter 20000101 --datebefore 20091231
|
||||
$ # will only download the videos uploaded in the 200x decade
|
||||
$ youtube-dl --dateafter 20000101 --datebefore 20091231
|
||||
```
|
||||
|
||||
# FAQ
|
||||
|
||||
@@ -407,49 +411,48 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||
3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
|
||||
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
|
||||
```python
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
import re
|
||||
from .common import InfoExtractor
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class YourExtractorIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://yourextractor.com/watch/42',
|
||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
||||
'info_dict': {
|
||||
'id': '42',
|
||||
'ext': 'mp4',
|
||||
'title': 'Video title goes here',
|
||||
# TODO more properties, either as:
|
||||
# * A value
|
||||
# * MD5 checksum; start the string with md5:
|
||||
# * A regular expression; start the string with re:
|
||||
# * Any Python type (for example int or float)
|
||||
}
|
||||
|
||||
class YourExtractorIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://yourextractor.com/watch/42',
|
||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
||||
'info_dict': {
|
||||
'id': '42',
|
||||
'ext': 'mp4',
|
||||
'title': 'Video title goes here',
|
||||
# TODO more properties, either as:
|
||||
# * A value
|
||||
# * MD5 checksum; start the string with md5:
|
||||
# * A regular expression; start the string with re:
|
||||
# * Any Python type (for example int or float)
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||
}
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||
}
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done.
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
|
||||
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
|
||||
|
@@ -70,6 +70,7 @@ __authors__ = (
|
||||
'David Fabijan',
|
||||
'Sebastian Haas',
|
||||
'Alexander Kirk',
|
||||
'Erik Johnson',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
@@ -69,6 +69,7 @@ from .dfb import DFBIE
|
||||
from .dotsub import DotsubIE
|
||||
from .dreisat import DreiSatIE
|
||||
from .drtv import DRTVIE
|
||||
from .dump import DumpIE
|
||||
from .defense import DefenseGouvFrIE
|
||||
from .discovery import DiscoveryIE
|
||||
from .divxstage import DivxStageIE
|
||||
@@ -239,6 +240,7 @@ from .orf import (
|
||||
ORFFM4IE,
|
||||
)
|
||||
from .parliamentliveuk import ParliamentLiveUKIE
|
||||
from .patreon import PatreonIE
|
||||
from .pbs import PBSIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .playvid import PlayvidIE
|
||||
|
@@ -1,5 +1,7 @@
|
||||
#coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -13,13 +15,14 @@ class AparatIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.aparat.com/v/wP8On',
|
||||
u'file': u'wP8On.mp4',
|
||||
u'md5': u'6714e0af7e0d875c5a39c4dc4ab46ad1',
|
||||
u'info_dict': {
|
||||
u"title": u"تیم گلکسی 11 - زومیت",
|
||||
'url': 'http://www.aparat.com/v/wP8On',
|
||||
'md5': '6714e0af7e0d875c5a39c4dc4ab46ad1',
|
||||
'info_dict': {
|
||||
'id': 'wP8On',
|
||||
'ext': 'mp4',
|
||||
'title': 'تیم گلکسی 11 - زومیت',
|
||||
},
|
||||
#u'skip': u'Extremely unreliable',
|
||||
# 'skip': 'Extremely unreliable',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -29,8 +32,8 @@ class AparatIE(InfoExtractor):
|
||||
# Note: There is an easier-to-parse configuration at
|
||||
# http://www.aparat.com/video/video/config/videohash/%video_id
|
||||
# but the URL in there does not work
|
||||
embed_url = (u'http://www.aparat.com/video/video/embed/videohash/' +
|
||||
video_id + u'/vt/frame')
|
||||
embed_url = ('http://www.aparat.com/video/video/embed/videohash/' +
|
||||
video_id + '/vt/frame')
|
||||
webpage = self._download_webpage(embed_url, video_id)
|
||||
|
||||
video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage)
|
||||
|
39
youtube_dl/extractor/dump.py
Normal file
39
youtube_dl/extractor/dump.py
Normal file
@@ -0,0 +1,39 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class DumpIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.dump.com/oneus/',
|
||||
'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
|
||||
'info_dict': {
|
||||
'id': 'oneus',
|
||||
'ext': 'flv',
|
||||
'title': "He's one of us.",
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(
|
||||
r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
|
||||
|
||||
thumb = self._og_search_thumbnail(webpage)
|
||||
title = self._search_regex(r'<b>([^"]+)</b>', webpage, 'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'thumbnail': thumb,
|
||||
}
|
@@ -9,6 +9,7 @@ from ..utils import (
|
||||
compat_urllib_request,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
@@ -83,6 +84,21 @@ class MetacafeIE(InfoExtractor):
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
# Movieclips.com video
|
||||
{
|
||||
'url': 'http://www.metacafe.com/watch/mv-Wy7ZU/my_week_with_marilyn_do_you_love_me/',
|
||||
'info_dict': {
|
||||
'id': 'mv-Wy7ZU',
|
||||
'ext': 'mp4',
|
||||
'title': 'My Week with Marilyn - Do You Love Me?',
|
||||
'description': 'From the movie My Week with Marilyn - Colin (Eddie Redmayne) professes his love to Marilyn (Michelle Williams) and gets her to promise to return to set and finish the movie.',
|
||||
'uploader': 'movie_trailers',
|
||||
'duration': 176,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'requires rtmpdump',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def report_disclaimer(self):
|
||||
@@ -134,6 +150,7 @@ class MetacafeIE(InfoExtractor):
|
||||
|
||||
# Extract URL, uploader and title from webpage
|
||||
self.report_extraction(video_id)
|
||||
video_url = None
|
||||
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
|
||||
if mobj is not None:
|
||||
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
|
||||
@@ -146,16 +163,17 @@ class MetacafeIE(InfoExtractor):
|
||||
else:
|
||||
gdaKey = mobj.group(1)
|
||||
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
|
||||
else:
|
||||
if video_url is None:
|
||||
mobj = re.search(r'<video src="([^"]+)"', webpage)
|
||||
if mobj:
|
||||
video_url = mobj.group(1)
|
||||
video_ext = 'mp4'
|
||||
else:
|
||||
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError('Unable to extract media URL')
|
||||
vardict = compat_parse_qs(mobj.group(1))
|
||||
if video_url is None:
|
||||
flashvars = self._search_regex(
|
||||
r' name="flashvars" value="(.*?)"', webpage, 'flashvars',
|
||||
default=None)
|
||||
if flashvars:
|
||||
vardict = compat_parse_qs(flashvars)
|
||||
if 'mediaData' not in vardict:
|
||||
raise ExtractorError('Unable to extract media URL')
|
||||
mobj = re.search(
|
||||
@@ -165,26 +183,68 @@ class MetacafeIE(InfoExtractor):
|
||||
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
|
||||
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
|
||||
video_ext = determine_ext(video_url)
|
||||
if video_url is None:
|
||||
player_url = self._search_regex(
|
||||
r"swfobject\.embedSWF\('([^']+)'",
|
||||
webpage, 'config URL', default=None)
|
||||
if player_url:
|
||||
config_url = self._search_regex(
|
||||
r'config=(.+)$', player_url, 'config URL')
|
||||
config_doc = self._download_xml(
|
||||
config_url, video_id,
|
||||
note='Downloading video config')
|
||||
smil_url = config_doc.find('.//properties').attrib['smil_file']
|
||||
smil_doc = self._download_xml(
|
||||
smil_url, video_id,
|
||||
note='Downloading SMIL document')
|
||||
base_url = smil_doc.find('./head/meta').attrib['base']
|
||||
video_url = []
|
||||
for vn in smil_doc.findall('.//video'):
|
||||
br = int(vn.attrib['system-bitrate'])
|
||||
play_path = vn.attrib['src']
|
||||
video_url.append({
|
||||
'format_id': 'smil-%d' % br,
|
||||
'url': base_url,
|
||||
'play_path': play_path,
|
||||
'page_url': url,
|
||||
'player_url': player_url,
|
||||
'ext': play_path.partition(':')[0],
|
||||
})
|
||||
|
||||
video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, 'title')
|
||||
if video_url is None:
|
||||
raise ExtractorError('Unsupported video type')
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'(?im)<title>(.*) - Video</title>', webpage, 'title')
|
||||
description = self._og_search_description(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
video_uploader = self._html_search_regex(
|
||||
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
|
||||
webpage, 'uploader nickname', fatal=False)
|
||||
duration = int_or_none(
|
||||
self._html_search_meta('video:duration', webpage))
|
||||
|
||||
if re.search(r'"contentRating":"restricted"', webpage) is not None:
|
||||
age_limit = 18
|
||||
age_limit = (
|
||||
18
|
||||
if re.search(r'"contentRating":"restricted"', webpage)
|
||||
else 0)
|
||||
|
||||
if isinstance(video_url, list):
|
||||
formats = video_url
|
||||
else:
|
||||
age_limit = 0
|
||||
formats = [{
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
}]
|
||||
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'description': description,
|
||||
'uploader': video_uploader,
|
||||
'title': video_title,
|
||||
'thumbnail':thumbnail,
|
||||
'ext': video_ext,
|
||||
'thumbnail': thumbnail,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
'duration': duration,
|
||||
}
|
||||
|
101
youtube_dl/extractor/patreon.py
Normal file
101
youtube_dl/extractor/patreon.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
js_to_json,
|
||||
)
|
||||
|
||||
|
||||
class PatreonIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?patreon\.com/creation\?hid=(.+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.patreon.com/creation?hid=743933',
|
||||
'md5': 'e25505eec1053a6e6813b8ed369875cc',
|
||||
'info_dict': {
|
||||
'id': '743933',
|
||||
'ext': 'mp3',
|
||||
'title': 'Episode 166: David Smalley of Dogma Debate',
|
||||
'uploader': 'Cognitive Dissonance Podcast',
|
||||
'thumbnail': 're:^https?://.*$',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://www.patreon.com/creation?hid=754133',
|
||||
'md5': '3eb09345bf44bf60451b8b0b81759d0a',
|
||||
'info_dict': {
|
||||
'id': '754133',
|
||||
'ext': 'mp3',
|
||||
'title': 'CD 167 Extra',
|
||||
'uploader': 'Cognitive Dissonance Podcast',
|
||||
'thumbnail': 're:^https?://.*$',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
# Currently Patreon exposes download URL via hidden CSS, so login is not
|
||||
# needed. Keeping this commented for when this inevitably changes.
|
||||
'''
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_form = {
|
||||
'redirectUrl': 'http://www.patreon.com/',
|
||||
'email': username,
|
||||
'password': password,
|
||||
}
|
||||
|
||||
request = compat_urllib_request.Request(
|
||||
'https://www.patreon.com/processLogin',
|
||||
compat_urllib_parse.urlencode(login_form).encode('utf-8')
|
||||
)
|
||||
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
|
||||
|
||||
if re.search(r'onLoginFailed', login_page):
|
||||
raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
'''
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._og_search_title(webpage).strip()
|
||||
|
||||
attach_fn = self._html_search_regex(
|
||||
r'<div class="attach"><a target="_blank" href="([^"]+)">',
|
||||
webpage, 'attachment URL', default=None)
|
||||
if attach_fn is not None:
|
||||
video_url = 'http://www.patreon.com' + attach_fn
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
uploader = self._html_search_regex(
|
||||
r'<strong>(.*?)</strong> is creating', webpage, 'uploader')
|
||||
else:
|
||||
playlist_js = self._search_regex(
|
||||
r'(?s)new\s+jPlayerPlaylist\(\s*\{\s*[^}]*},\s*(\[.*?,?\s*\])',
|
||||
webpage, 'playlist JSON')
|
||||
playlist_json = js_to_json(playlist_js)
|
||||
playlist = json.loads(playlist_json)
|
||||
data = playlist[0]
|
||||
video_url = self._proto_relative_url(data['mp3'])
|
||||
thumbnail = self._proto_relative_url(data.get('cover'))
|
||||
uploader = data.get('artist')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp3',
|
||||
'title': title,
|
||||
'uploader': uploader,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
@@ -54,6 +54,18 @@ class PBSIE(InfoExtractor):
|
||||
'duration': 801,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/',
|
||||
'md5': 'c62859342be2a0358d6c9eb306595978',
|
||||
'info_dict': {
|
||||
'id': '2365297708',
|
||||
'ext': 'mp4',
|
||||
'description': 'md5:68d87ef760660eb564455eb30ca464fe',
|
||||
'title': 'Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
|
||||
'duration': 6559,
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _extract_ids(self, url):
|
||||
@@ -75,7 +87,7 @@ class PBSIE(InfoExtractor):
|
||||
return media_id, presumptive_id
|
||||
|
||||
url = self._search_regex(
|
||||
r'<iframe\s+id=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
|
||||
r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
|
||||
webpage, 'player URL')
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
|
@@ -225,7 +225,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||
|
||||
# Dash webm audio
|
||||
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 48, 'preference': -50},
|
||||
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
|
||||
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
|
||||
|
||||
# RTMP (unnamed)
|
||||
@@ -508,6 +508,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
sub_lang_list = {}
|
||||
for l in lang_list:
|
||||
lang = l[1]
|
||||
if lang in sub_lang_list:
|
||||
continue
|
||||
params = compat_urllib_parse.urlencode({
|
||||
'lang': lang,
|
||||
'v': video_id,
|
||||
|
@@ -233,18 +233,24 @@ else:
|
||||
def write_json_file(obj, fn):
|
||||
""" Encode obj as JSON and write it to fn, atomically """
|
||||
|
||||
args = {
|
||||
'suffix': '.tmp',
|
||||
'prefix': os.path.basename(fn) + '.',
|
||||
'dir': os.path.dirname(fn),
|
||||
'delete': False,
|
||||
}
|
||||
|
||||
# In Python 2.x, json.dump expects a bytestream.
|
||||
# In Python 3.x, it writes to a character stream
|
||||
if sys.version_info < (3, 0):
|
||||
mode = 'wb'
|
||||
encoding = None
|
||||
args['mode'] = 'wb'
|
||||
else:
|
||||
mode = 'w'
|
||||
encoding = 'utf-8'
|
||||
tf = tempfile.NamedTemporaryFile(
|
||||
suffix='.tmp', prefix=os.path.basename(fn) + '.',
|
||||
dir=os.path.dirname(fn),
|
||||
delete=False)
|
||||
args.update({
|
||||
'mode': 'w',
|
||||
'encoding': 'utf-8',
|
||||
})
|
||||
|
||||
tf = tempfile.NamedTemporaryFile(**args)
|
||||
|
||||
try:
|
||||
with tf:
|
||||
@@ -1468,6 +1474,34 @@ def strip_jsonp(code):
|
||||
return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
|
||||
|
||||
|
||||
def js_to_json(code):
|
||||
def fix_kv(m):
|
||||
key = m.group(2)
|
||||
if key.startswith("'"):
|
||||
assert key.endswith("'")
|
||||
assert '"' not in key
|
||||
key = '"%s"' % key[1:-1]
|
||||
elif not key.startswith('"'):
|
||||
key = '"%s"' % key
|
||||
|
||||
value = m.group(4)
|
||||
if value.startswith("'"):
|
||||
assert value.endswith("'")
|
||||
assert '"' not in value
|
||||
value = '"%s"' % value[1:-1]
|
||||
|
||||
return m.group(1) + key + m.group(3) + value
|
||||
|
||||
res = re.sub(r'''(?x)
|
||||
([{,]\s*)
|
||||
("[^"]*"|\'[^\']*\'|[a-z0-9A-Z]+)
|
||||
(:\s*)
|
||||
([0-9.]+|true|false|"[^"]*"|\'[^\']*\'|\[|\{)
|
||||
''', fix_kv, code)
|
||||
res = re.sub(r',(\s*\])', lambda m: m.group(1), res)
|
||||
return res
|
||||
|
||||
|
||||
def qualities(quality_ids):
|
||||
""" Get a numeric quality value out of a list of possible values """
|
||||
def q(qid):
|
||||
|
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2014.08.21.2'
|
||||
__version__ = '2014.08.22.2'
|
||||
|
Reference in New Issue
Block a user