Compare commits

...

56 Commits

Author SHA1 Message Date
Philipp Hagemeister
a6e0afa2bb release 2015.10.18 2015-10-18 19:23:40 +02:00
Yen Chi Hsuan
4285a47f40 Merge pull request #7208 from yan12125/letv-fix
[Letv] Fix extraction
2015-10-18 22:32:10 +08:00
Sergey M․
e36963e0eb [eagleplatform] Identify hls formats 2015-10-18 20:24:33 +06:00
Sergey M․
dedd35c6bc [viewster] Fix failing m3u8 2015-10-18 19:59:18 +06:00
Sergey M․
608945d44a [canalc2] Fix test 2015-10-18 19:27:22 +06:00
Sergey M․
b1bf063503 [canalc2] Extract duration 2015-10-18 19:27:05 +06:00
Sergey M․
14bddf35fb [canalc2] Add ext 2015-10-18 19:23:52 +06:00
Sergey M․
ef6c868f23 [canalc2] Improve some regexes 2015-10-18 19:23:31 +06:00
Sergey M․
6682049dee [canalc2] Improve rtmp extraction 2015-10-18 19:19:43 +06:00
remitamine
b0f001a6cb [canalc2] fix info extraction 2015-10-18 19:06:53 +06:00
Jaime Marquínez Ferrándiz
dd67702a3e [imdb] Fix extraction (fixes #7220) 2015-10-18 14:13:06 +02:00
Yen Chi Hsuan
05a3879f1c [letv] Update M3U8's MIME type
The new MIME type appears in the following places:
https://www.iana.org/assignments/media-types/media-types.xhtml#application
https://hg.python.org/cpython/file/tip/Lib/mimetypes.py
2015-10-18 19:19:46 +08:00
Yen Chi Hsuan
4a7b790384 [twitter:card] Support YouTube embeds 2015-10-18 19:07:37 +08:00
Yen Chi Hsuan
09ff81316e Merge branch 'atomicdryad-pr-twitter' 2015-10-18 18:44:21 +08:00
Yen Chi Hsuan
c88aec845a [twitter] Fix short URL extraction 2015-10-18 18:23:56 +08:00
Yen Chi Hsuan
77a54b6a65 [twitter:card] Use _html_search_regex 2015-10-18 18:08:24 +08:00
Yen Chi Hsuan
575036b405 [twitter] Simplify and improve 2015-10-18 18:04:13 +08:00
Yen Chi Hsuan
f6dfd6603a [twitter] Use _html_search_regex 2015-10-18 17:18:01 +08:00
Yen Chi Hsuan
e04edad621 [twitter] Inherit from InfoExtractor directly 2015-10-18 17:16:57 +08:00
Yen Chi Hsuan
f322bfb063 [twitter:card] Remove unneeded 'ext' 2015-10-18 17:15:47 +08:00
Yen Chi Hsuan
014e880372 [twitter] Add IE_NAMEs 2015-10-18 17:13:58 +08:00
Yen Chi Hsuan
01d22d4703 [twitter] Use _download_xml 2015-10-18 17:11:55 +08:00
Yen Chi Hsuan
48aae2d2cf [twitter] Update tests 2015-10-18 17:07:48 +08:00
Yen Chi Hsuan
c571dea953 Merge branch 'pr-twitter' of https://github.com/atomicdryad/youtube-dl into atomicdryad-pr-twitter 2015-10-18 16:49:56 +08:00
Yen Chi Hsuan
8b172c2e10 [YoutubeDL] Use DataHandler 2015-10-18 13:44:22 +08:00
Yen Chi Hsuan
0a67a3632b [compat] Add compat_urllib_request_DataHandler 2015-10-18 13:44:21 +08:00
Yen Chi Hsuan
985e4fdc07 [downloader/hls] Add headers only for http(s) URLs
ffmpeg 2.8.1 raises an error with -headers and non-http input files.
2015-10-18 13:44:21 +08:00
Yen Chi Hsuan
1e399778ee [letv] Fix extraction
Using data URIs for passing the decrypted M3U8 manifest, which is
supported by ffmpeg only.
2015-10-18 13:42:57 +08:00
Sergey M․
2e022397c4 [vine] Add counters to tests 2015-10-18 09:36:19 +06:00
Sergey M․
02835c6bf4 [extractor/common] Document repost_count 2015-10-18 09:34:54 +06:00
Sergey M․
91816e8f16 [vine] Remove duplicate metadata, make more robust and modernize (Closes #7215) 2015-10-18 09:32:08 +06:00
Lukáš Lalinský
10c38c7ca2 [vine] Fix download tests 2015-10-18 09:20:54 +06:00
Lukáš Lalinský
94a773feb9 [vine] Use JS data to get title/alt_title 2015-10-18 09:20:46 +06:00
Sergey M․
448ef1f31c [extractor/common] Allow angle brackets in attributes in _og_regexes (#7215) 2015-10-18 09:11:02 +06:00
Sergey M․
49941c4e4f [crunchyroll] Add maturity wall reference tests (#7202) 2015-10-18 07:06:47 +06:00
Sergey M․
80f48920c8 [crunchyroll] Bypass maturity wall (Closes #7202) 2015-10-18 06:57:57 +06:00
Sergey M․
5a11b793fe [lynda] Extract all prioritized streams 2015-10-18 01:36:03 +06:00
Sergey M․
7593fbaa12 [dailymotion] Error spelling 2015-10-18 01:00:37 +06:00
Sergey M.
2eb0f72a0e Merge pull request #7212 from lalinsky/dailymotion-error
[dailymotion] Report errors from player v5
2015-10-18 00:54:27 +06:00
Sergey M․
8e5b121948 [test_youtube_lists] Add test flat playlist entries' titles 2015-10-18 00:27:06 +06:00
Sergey M․
648e6a1ffe [youtube] Generalize playlist entries extraction (Closes #6699, closes #6992) 2015-10-18 00:11:34 +06:00
Lukáš Lalinský
583882fdce [dailymotion] Report errors from player v5 2015-10-17 19:26:30 +02:00
Sergey M․
9eb31b265f [vidme] Add user-disabled test 2015-10-17 23:01:24 +06:00
Sergey M.
ddeb1b3de2 Merge pull request #7211 from lalinsky/vidme-suspended
[vidme] Better error message for suspended vidme videos
2015-10-17 22:56:51 +06:00
Lukáš Lalinský
59fe4824f8 [vidme] Better error message for suspended vidme videos 2015-10-17 18:52:25 +02:00
Sergey M․
dd8417526b [vimeo] Clarify new react+flux website fallback 2015-10-17 22:48:14 +06:00
Sergey M.
09670d5ba4 Merge pull request #7209 from lalinsky/vimeo-new-page
Extract config URL from (new?) React-based Vimeo's page
2015-10-17 22:39:17 +06:00
Lukáš Lalinský
41a7b00f18 [vimeo] Extract config URL from (new?) React-based Vimeo's page 2015-10-17 18:30:56 +02:00
Sergey M․
350c948133 [twitch:vod] Formatting 2015-10-17 18:43:12 +06:00
Sergey M․
e5e9966199 [twitch:vod] Improve extraction 2015-10-17 18:29:54 +06:00
Sergey M․
fbd9f6ea80 [twitch] Improve authentication 2015-10-17 18:28:21 +06:00
Jaime Marquínez Ferrándiz
6df7179e6c [rte] Actually recognize https urls
There was a missing 's' before the '?'.
2015-10-17 11:53:59 +02:00
Jaime Marquínez Ferrándiz
36eb802baf [rte] Replace expired test
According to their webpage it should be available until October 2035.
2015-10-17 11:49:51 +02:00
fnord
9e7e0dffd5 Actually add the extractor 2015-07-21 16:56:35 -05:00
fnord
c3dea3f878 Twittercard: support vmapurl method 2015-07-21 16:45:36 -05:00
fnord
f57f84f606 Twitter: get and describe video from status urls 2015-07-21 16:38:40 -05:00
25 changed files with 509 additions and 205 deletions

View File

@@ -588,7 +588,8 @@
- **twitch:stream**
- **twitch:video**
- **twitch:vod**
- **TwitterCard**
- **twitter**
- **twitter:card**
- **Ubu**
- **udemy**
- **udemy:course**

View File

@@ -37,12 +37,16 @@ class TestInfoExtractor(unittest.TestCase):
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&amp;key2=val2'/>
<meta content='application/x-shockwave-flash' property='og:video:type'>
<meta content='Foo' property=og:foobar>
<meta name="og:test1" content='foo > < bar'/>
<meta name="og:test2" content="foo >//< bar"/>
'''
self.assertEqual(ie._og_search_title(html), 'Foo')
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
self.assertEqual(ie._og_search_video_url(html, default=None), None)
self.assertEqual(ie._og_search_property('foobar', html), 'Foo')
self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar')
self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar')
def test_html_search_meta(self):
ie = self.ie

View File

@@ -57,5 +57,14 @@ class TestYoutubeLists(unittest.TestCase):
entries = result['entries']
self.assertEqual(len(entries), 100)
def test_youtube_flat_playlist_titles(self):
dl = FakeYDL()
dl.params['extract_flat'] = True
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertIsPlaylist(result)
for entry in result['entries']:
self.assertTrue(entry.get('title'))
if __name__ == '__main__':
unittest.main()

View File

@@ -37,6 +37,7 @@ from .compat import (
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .utils import (
ContentTooShortError,
@@ -1967,8 +1968,9 @@ class YoutubeDL(object):
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
data_handler = compat_urllib_request_DataHandler()
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh)
proxy_handler, https_handler, cookie_processor, ydlh, data_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play

View File

@@ -1,7 +1,10 @@
from __future__ import unicode_literals
import binascii
import collections
import email
import getpass
import io
import optparse
import os
import re
@@ -38,6 +41,11 @@ try:
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
@@ -155,6 +163,40 @@ except ImportError: # Python 2
string = string.replace('+', ' ')
return compat_urllib_parse_unquote(string, encoding, errors)
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
def data_open(self, req):
# data URLs as specified in RFC 2397.
#
# ignores POSTed data
#
# syntax:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
url = req.get_full_url()
scheme, data = url.split(":", 1)
mediatype, data = data.split(",", 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
if mediatype.endswith(";base64"):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
mediatype = "text/plain;charset=US-ASCII"
headers = email.message_from_string(
"Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
except NameError:
@@ -489,6 +531,8 @@ __all__ = [
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',

View File

@@ -30,7 +30,7 @@ class HlsFD(FileDownloader):
args = [ffpp.executable, '-y']
if info_dict['http_headers']:
if info_dict['http_headers'] and re.match(r'^https?://', url):
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
args += [

View File

@@ -690,7 +690,7 @@ from .twitch import (
TwitchBookmarksIE,
TwitchStreamIE,
)
from .twitter import TwitterCardIE
from .twitter import TwitterCardIE, TwitterIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,

View File

@@ -4,38 +4,53 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_duration
class Canalc2IE(InfoExtractor):
IE_NAME = 'canalc2.tv'
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
_VALID_URL = r'https?://(?:www\.)?canalc2\.tv/video/(?P<id>\d+)'
_TEST = {
'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
'url': 'http://www.canalc2.tv/video/12163',
'md5': '060158428b650f896c542dfbb3d6487f',
'info_dict': {
'id': '12163',
'ext': 'mp4',
'title': 'Terrasses du Numérique'
'ext': 'flv',
'title': 'Terrasses du Numérique',
'duration': 122,
},
'params': {
'skip_download': True, # Requires rtmpdump
}
}
def _real_extract(self, url):
video_id = re.match(self._VALID_URL, url).group('id')
# We need to set the voir field for getting the file name
url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
file_name = self._search_regex(
r"so\.addVariable\('file','(.*?)'\);",
webpage, 'file name')
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
video_url = self._search_regex(
r'jwplayer\((["\'])Player\1\)\.setup\({[^}]*file\s*:\s*(["\'])(?P<file>.+?)\2',
webpage, 'video_url', group='file')
formats = [{'url': video_url}]
if video_url.startswith('rtmp://'):
rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url)
formats[0].update({
'url': rtmp.group('url'),
'ext': 'flv',
'app': rtmp.group('app'),
'play_path': rtmp.group('play_path'),
'page_url': url,
})
title = self._html_search_regex(
r'class="evenement8">(.*?)</a>', webpage, 'title')
r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.*?)</h3>', webpage, 'title')
duration = parse_duration(self._search_regex(
r'id=["\']video_duree["\'][^>]*>([^<]+)',
webpage, 'duration', fatal=False))
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
'duration': duration,
'formats': formats,
}

View File

@@ -172,6 +172,7 @@ class InfoExtractor(object):
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
@@ -645,7 +646,7 @@ class InfoExtractor(object):
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\'|\s*([^\s"\'=<>`]+?))'
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'

View File

@@ -32,6 +32,26 @@ from ..aes import (
class CrunchyrollBaseIE(InfoExtractor):
_NETRC_MACHINE = 'crunchyroll'
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
login_url = 'https://www.crunchyroll.com/?a=formhandler'
data = urlencode_postdata({
'formname': 'RpcApiUser_Login',
'name': username,
'password': password,
})
login_request = compat_urllib_request.Request(login_url, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(login_request, None, False, 'Wrong login info')
def _real_initialize(self):
self._login()
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
else compat_urllib_request.Request(url_or_request))
@@ -46,10 +66,22 @@ class CrunchyrollBaseIE(InfoExtractor):
return super(CrunchyrollBaseIE, self)._download_webpage(
request, video_id, note, errnote, fatal, tries, timeout, encoding)
@staticmethod
def _add_skip_wall(url):
parsed_url = compat_urlparse.urlparse(url)
qs = compat_urlparse.parse_qs(parsed_url.query)
# Always force skip_wall to bypass maturity wall, namely 18+ confirmation message:
# > This content may be inappropriate for some people.
# > Are you sure you want to continue?
# since it's not disabled by default in crunchyroll account's settings.
# See https://github.com/rg3/youtube-dl/issues/7202.
qs['skip_wall'] = ['1']
return compat_urlparse.urlunparse(
parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
class CrunchyrollIE(CrunchyrollBaseIE):
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_NETRC_MACHINE = 'crunchyroll'
_TESTS = [{
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
'info_dict': {
@@ -81,10 +113,13 @@ class CrunchyrollIE(CrunchyrollBaseIE):
# rtmp
'skip_download': True,
},
}, {
'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697',
'only_matching': True,
}, {
# geo-restricted (US), 18+ maturity wall, non-premium available
'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617',
'only_matching': True,
}]
_FORMAT_IDS = {
@@ -94,24 +129,6 @@ class CrunchyrollIE(CrunchyrollBaseIE):
'1080': ('80', '108'),
}
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
login_url = 'https://www.crunchyroll.com/?a=formhandler'
data = urlencode_postdata({
'formname': 'RpcApiUser_Login',
'name': username,
'password': password,
})
login_request = compat_urllib_request.Request(login_url, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(login_request, None, False, 'Wrong login info')
def _real_initialize(self):
self._login()
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
@@ -254,7 +271,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
else:
webpage_url = 'http://www.' + mobj.group('url')
webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage')
note_m = self._html_search_regex(
r'<div class="showmedia-trailer-notice">(.+?)</div>',
webpage, 'trailer-notice', default='')
@@ -352,7 +369,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
IE_NAME = "crunchyroll:playlist"
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
_TESTS = [{
'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
@@ -361,12 +378,25 @@ class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
},
'playlist_count': 13,
}, {
# geo-restricted (US), 18+ maturity wall, non-premium available
'url': 'http://www.crunchyroll.com/cosplay-complex-ova',
'info_dict': {
'id': 'cosplay-complex-ova',
'title': 'Cosplay Complex OVA'
},
'playlist_count': 3,
'skip': 'Georestricted',
}, {
# geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14
'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1',
'only_matching': True,
}]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(url, show_id)
webpage = self._download_webpage(self._add_skip_wall(url), show_id)
title = self._html_search_regex(
r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
webpage, 'title')

View File

@@ -96,6 +96,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'uploader': 'HotWaves1012',
'age_limit': 18,
}
},
# geo-restricted, player v5
{
'url': 'http://www.dailymotion.com/video/xhza0o',
'only_matching': True,
}
]
@@ -124,6 +129,9 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
if player_v5:
player = self._parse_json(player_v5, video_id)
metadata = player['metadata']
self._check_error(metadata)
formats = []
for quality, media_list in metadata['qualities'].items():
for media in media_list:
@@ -201,9 +209,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'video info', flags=re.MULTILINE),
video_id)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
self._check_error(info)
formats = []
for (key, format_id) in self._FORMATS:
@@ -246,6 +252,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'duration': info['duration']
}
def _check_error(self, info):
if info.get('error') is not None:
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True)
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(

View File

@@ -87,7 +87,7 @@ class EaglePlatformIE(InfoExtractor):
m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON')
formats = self._extract_m3u8_formats(
m3u8_url, video_id,
'mp4', entry_protocol='m3u8_native')
'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
mp4_url = self._get_video_url(
# Secure mp4 URL is constructed according to Player.prototype.mp4 from

View File

@@ -4,8 +4,8 @@ import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
from ..utils import (
qualities,
)
@@ -30,24 +30,33 @@ class ImdbIE(InfoExtractor):
descr = self._html_search_regex(
r'(?s)<span itemprop="description">(.*?)</span>',
webpage, 'description', fatal=False)
available_formats = re.findall(
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
flags=re.MULTILINE)
player_url = 'http://www.imdb.com/video/imdb/vi%s/imdb/single' % video_id
player_page = self._download_webpage(
player_url, video_id, 'Downloading player page')
# the player page contains the info for the default format, we have to
# fetch other pages for the rest of the formats
extra_formats = re.findall(r'href="(?P<url>%s.*?)".*?>(?P<name>.*?)<' % re.escape(player_url), player_page)
format_pages = [
self._download_webpage(
f_url, video_id, 'Downloading info for %s format' % f_name)
for f_url, f_name in extra_formats]
format_pages.append(player_page)
quality = qualities(['SD', '480p', '720p'])
formats = []
for f_id, f_path in available_formats:
f_path = f_path.strip()
format_page = self._download_webpage(
compat_urlparse.urljoin(url, f_path),
'Downloading info for %s format' % f_id)
for format_page in format_pages:
json_data = self._search_regex(
r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
format_page, 'json data', flags=re.DOTALL)
info = json.loads(json_data)
format_info = info['videoPlayerObject']['video']
f_id = format_info['ffname']
formats.append({
'format_id': f_id,
'url': format_info['videoInfoList'][0]['videoUrl'],
'quality': quality(f_id),
})
self._sort_formats(formats)
return {
'id': video_id,

View File

@@ -9,13 +9,14 @@ from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
compat_ord,
)
from ..utils import (
determine_ext,
ExtractorError,
parse_iso8601,
int_or_none,
encode_data_uri,
)
@@ -25,15 +26,16 @@ class LetvIE(InfoExtractor):
_TESTS = [{
'url': 'http://www.letv.com/ptv/vplay/22005890.html',
'md5': 'cab23bd68d5a8db9be31c9a222c1e8df',
'md5': 'edadcfe5406976f42f9f266057ee5e40',
'info_dict': {
'id': '22005890',
'ext': 'mp4',
'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家',
'timestamp': 1424747397,
'upload_date': '20150224',
'description': 'md5:a9cb175fd753e2962176b7beca21a47c',
}
},
'params': {
'hls_prefer_native': True,
},
}, {
'url': 'http://www.letv.com/ptv/vplay/1415246.html',
'info_dict': {
@@ -42,16 +44,22 @@ class LetvIE(InfoExtractor):
'title': '美人天下01',
'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda',
},
'params': {
'hls_prefer_native': True,
},
}, {
'note': 'This video is available only in Mainland China, thus a proxy is needed',
'url': 'http://www.letv.com/ptv/vplay/1118082.html',
'md5': 'f80936fbe20fb2f58648e81386ff7927',
'md5': '2424c74948a62e5f31988438979c5ad1',
'info_dict': {
'id': '1118082',
'ext': 'mp4',
'title': '与龙共舞 完整版',
'description': 'md5:7506a5eeb1722bb9d4068f85024e3986',
},
'params': {
'hls_prefer_native': True,
},
'skip': 'Only available in China',
}]
@@ -74,6 +82,27 @@ class LetvIE(InfoExtractor):
_loc3_ = self.ror(_loc3_, _loc2_ % 17)
return _loc3_
# see M3U8Encryption class in KLetvPlayer.swf
@staticmethod
def decrypt_m3u8(encrypted_data):
if encrypted_data[:5].decode('utf-8').lower() != 'vc_01':
return encrypted_data
encrypted_data = encrypted_data[5:]
_loc4_ = bytearray()
while encrypted_data:
b = compat_ord(encrypted_data[0])
_loc4_.extend([b // 16, b & 0x0f])
encrypted_data = encrypted_data[1:]
idx = len(_loc4_) - 11
_loc4_ = _loc4_[idx:] + _loc4_[:idx]
_loc7_ = bytearray()
while _loc4_:
_loc7_.append(_loc4_[0] * 16 + _loc4_[1])
_loc4_ = _loc4_[2:]
return bytes(_loc7_)
def _real_extract(self, url):
media_id = self._match_id(url)
page = self._download_webpage(url, media_id)
@@ -115,23 +144,28 @@ class LetvIE(InfoExtractor):
for format_id in formats:
if format_id in dispatch:
media_url = playurl['domain'][0] + dispatch[format_id][0]
# Mimic what flvxz.com do
url_parts = list(compat_urlparse.urlparse(media_url))
qs = dict(compat_urlparse.parse_qs(url_parts[4]))
qs.update({
'platid': '14',
'splatid': '1401',
'tss': 'no',
'retry': 1
media_url += '&' + compat_urllib_parse.urlencode({
'm3v': 1,
'format': 1,
'expect': 3,
'rateid': format_id,
})
url_parts[4] = compat_urllib_parse.urlencode(qs)
media_url = compat_urlparse.urlunparse(url_parts)
nodes_data = self._download_json(
media_url, media_id,
'Download JSON metadata for format %s' % format_id)
req = self._request_webpage(
nodes_data['nodelist'][0]['location'], media_id,
note='Downloading m3u8 information for format %s' % format_id)
m3u8_data = self.decrypt_m3u8(req.read())
url_info_dict = {
'url': media_url,
'url': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'),
'ext': determine_ext(dispatch[format_id][1]),
'format_id': format_id,
'protocol': 'm3u8',
}
if format_id[-1:] == 'p':

View File

@@ -140,13 +140,14 @@ class LyndaIE(LyndaBaseIE):
prioritized_streams = video_json.get('PrioritizedStreams')
if prioritized_streams:
formats.extend([
{
'url': video_url,
'width': int_or_none(format_id),
'format_id': format_id,
} for format_id, video_url in prioritized_streams['0'].items()
])
for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
formats.extend([
{
'url': video_url,
'width': int_or_none(format_id),
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
} for format_id, video_url in prioritized_stream.items()
])
self._check_formats(formats, video_id)
self._sort_formats(formats)

View File

@@ -9,16 +9,16 @@ from ..utils import (
class RteIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
_VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rte.ie/player/de/show/10363114/',
'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/',
'info_dict': {
'id': '10363114',
'id': '10478715',
'ext': 'mp4',
'title': 'One News',
'title': 'Watch iWitness online',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'The One O\'Clock News followed by Weather.',
'duration': 436.844,
'description': 'iWitness : The spirit of Ireland, one voice and one minute at a time.',
'duration': 60.046,
},
'params': {
'skip_download': 'f4m fails with --test atm'

View File

@@ -15,6 +15,7 @@ from ..compat import (
compat_urlparse,
)
from ..utils import (
encode_dict,
ExtractorError,
int_or_none,
parse_duration,
@@ -27,8 +28,7 @@ class TwitchBaseIE(InfoExtractor):
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'http://usher.twitch.tv'
_LOGIN_URL = 'https://secure.twitch.tv/login'
_LOGIN_POST_URL = 'https://passport.twitch.tv/authentications/new'
_LOGIN_URL = 'http://www.twitch.tv/login'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
@@ -61,26 +61,28 @@ class TwitchBaseIE(InfoExtractor):
if username is None:
return
login_page = self._download_webpage(
login_page, handle = self._download_webpage_handle(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'login': username.encode('utf-8'),
'password': password.encode('utf-8'),
'username': username,
'password': password,
})
redirect_url = handle.geturl()
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=self._LOGIN_POST_URL, group='url')
'post url', default=redirect_url, group='url')
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
post_url = compat_urlparse.urljoin(redirect_url, post_url)
request = compat_urllib_request.Request(
post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
request.add_header('Referer', redirect_url)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -238,14 +240,24 @@ class TwitchVodIE(TwitchItemBaseIE):
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true'
% (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
'%s/vod/%s?%s' % (
self._USHER_BASE, item_id,
compat_urllib_parse.urlencode({
'allow_source': 'true',
'allow_spectre': 'true',
'player': 'twitchweb',
'nauth': access_token['token'],
'nauthsig': access_token['sig'],
})),
item_id, 'mp4')
self._prefer_source(formats)
info['formats'] = formats

View File

@@ -1,3 +1,4 @@
# coding: utf-8
from __future__ import unicode_literals
import re
@@ -6,23 +7,51 @@ from .common import InfoExtractor
from ..compat import compat_urllib_request
from ..utils import (
float_or_none,
unescapeHTML,
xpath_text,
remove_end,
)
class TwitterCardIE(InfoExtractor):
IE_NAME = 'twitter:card'
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)'
_TEST = {
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
'md5': 'a74f50b310c83170319ba16de6955192',
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 30.033,
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
'md5': '7d2f6b4d2eb841a7ccc893d479bfceb4',
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 30.033,
}
},
}
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
'md5': '7ee2a553b63d1bccba97fbed97d9e1c8',
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': 'TwitterCard',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 80.155,
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
'md5': 'b6f35e8b08a0bec6c8af77a2f4b3a814',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
'title': 'Ubuntu 11.10 Overview',
'description': 'Take a quick peek at what\'s new and improved in Ubuntu 11.10.\n\nOnce installed take a look at 10 Things to Do After Installing: http://www.omgubuntu.co.uk/2011/10/10-things-to-do-after-installing-ubuntu-11-10/',
'upload_date': '20111013',
'uploader': 'OMG! Ubuntu!',
'uploader_id': 'omgubuntu',
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
@@ -40,10 +69,24 @@ class TwitterCardIE(InfoExtractor):
request.add_header('User-Agent', user_agent)
webpage = self._download_webpage(request, video_id)
config = self._parse_json(
unescapeHTML(self._search_regex(
r'data-player-config="([^"]+)"', webpage, 'data player config')),
youtube_url = self._html_search_regex(
r'<iframe[^>]+src="((?:https?:)?//www.youtube.com/embed/[^"]+)"',
webpage, 'youtube iframe', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
config = self._parse_json(self._html_search_regex(
r'data-player-config="([^"]+)"', webpage, 'data player config'),
video_id)
if 'playlist' not in config:
if 'vmapUrl' in config:
vmap_data = self._download_xml(config['vmapUrl'], video_id)
video_url = xpath_text(vmap_data, './/MediaFile').strip()
formats.append({
'url': video_url,
})
break # same video regardless of UA
continue
video_url = config['playlist'][0]['source']
@@ -70,3 +113,54 @@ class TwitterCardIE(InfoExtractor):
'duration': duration,
'formats': formats,
}
class TwitterIE(InfoExtractor):
IE_NAME = 'twitter'
_VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?P<user_id>[^/]+)/status/(?P<id>\d+)'
_TEMPLATE_URL = 'https://twitter.com/%s/status/%s'
_TEST = {
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
'md5': '31cd83a116fc41f99ae3d909d4caf6a0',
'info_dict': {
'id': '643211948184596480',
'ext': 'mp4',
'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 12.922,
'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"',
'uploader': 'FREE THE NIPPLE',
'uploader_id': 'freethenipple',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('user_id')
twid = mobj.group('id')
webpage = self._download_webpage(self._TEMPLATE_URL % (user_id, twid), twid)
username = remove_end(self._og_search_title(webpage), ' on Twitter')
title = self._og_search_description(webpage).strip('').replace('\n', ' ')
# strip 'https -_t.co_BJYgOjSeGA' junk from filenames
mobj = re.match(r'“(.*)\s+(https?://[^ ]+)”', title)
title, short_url = mobj.groups()
card_id = self._search_regex(
r'["\']/i/cards/tfw/v1/(\d+)', webpage, 'twitter card url')
card_url = 'https://twitter.com/i/cards/tfw/v1/' + card_id
return {
'_type': 'url_transparent',
'ie_key': 'TwitterCard',
'uploader_id': user_id,
'uploader': username,
'url': card_url,
'webpage_url': url,
'description': '%s on Twitter: "%s %s"' % (username, title, short_url),
'title': username + ' - ' + title,
}

View File

@@ -93,6 +93,10 @@ class VidmeIE(InfoExtractor):
'params': {
'skip_download': True,
},
}, {
# nsfw, user-disabled
'url': 'https://vid.me/dzGJ',
'only_matching': True,
}]
def _real_extract(self, url):
@@ -114,6 +118,12 @@ class VidmeIE(InfoExtractor):
video = response['video']
if video.get('state') == 'user-disabled':
raise ExtractorError(
'Vidme said: This video has been suspended either due to a copyright claim, '
'or for violating the terms of use.',
expected=True)
formats = [{
'format_id': f.get('type'),
'url': f['uri'],

View File

@@ -131,10 +131,11 @@ class ViewsterIE(InfoExtractor):
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds'))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='hls',
fatal=False # m3u8 sometimes fail
))
fatal=False) # m3u8 sometimes fail
if m3u8_formats:
formats.extend(m3u8_formats)
else:
format_id = media.get('Bitrate')
f = {

View File

@@ -286,7 +286,17 @@ class VimeoIE(VimeoBaseInfoExtractor):
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, 'config URL')
r' data-config-url="(.+?)"', webpage,
'config URL', default=None)
if not config_url:
# Sometimes new react-based page is served instead of old one that require
# different config URL extraction approach (see
# https://github.com/rg3/youtube-dl/pull/7209)
vimeo_clip_page_config = self._search_regex(
r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage,
'vimeo clip page config')
config_url = self._parse_json(
vimeo_clip_page_config, video_id)['player']['config_url']
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:

View File

@@ -1,10 +1,14 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..utils import unified_strdate
from ..utils import (
int_or_none,
unified_strdate,
)
class VineIE(InfoExtractor):
@@ -17,10 +21,12 @@ class VineIE(InfoExtractor):
'ext': 'mp4',
'title': 'Chicken.',
'alt_title': 'Vine by Jack Dorsey',
'description': 'Chicken.',
'upload_date': '20130519',
'uploader': 'Jack Dorsey',
'uploader_id': '76',
'like_count': int,
'comment_count': int,
'repost_count': int,
},
}, {
'url': 'https://vine.co/v/MYxVapFvz2z',
@@ -29,11 +35,13 @@ class VineIE(InfoExtractor):
'id': 'MYxVapFvz2z',
'ext': 'mp4',
'title': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
'alt_title': 'Vine by Luna',
'description': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
'alt_title': 'Vine by Mars Ruiz',
'upload_date': '20140815',
'uploader': 'Luna',
'uploader': 'Mars Ruiz',
'uploader_id': '1102363502380728320',
'like_count': int,
'comment_count': int,
'repost_count': int,
},
}, {
'url': 'https://vine.co/v/bxVjBbZlPUH',
@@ -43,14 +51,33 @@ class VineIE(InfoExtractor):
'ext': 'mp4',
'title': '#mw3 #ac130 #killcam #angelofdeath',
'alt_title': 'Vine by Z3k3',
'description': '#mw3 #ac130 #killcam #angelofdeath',
'upload_date': '20130430',
'uploader': 'Z3k3',
'uploader_id': '936470460173008896',
'like_count': int,
'comment_count': int,
'repost_count': int,
},
}, {
'url': 'https://vine.co/oembed/MYxVapFvz2z.json',
'only_matching': True,
}, {
'url': 'https://vine.co/v/e192BnZnZ9V',
'info_dict': {
'id': 'e192BnZnZ9V',
'ext': 'mp4',
'title': 'ยิ้ม~ เขิน~ อาย~ น่าร้ากอ้ะ >//< @n_whitewo @orlameena #lovesicktheseries #lovesickseason2',
'alt_title': 'Vine by Pimry_zaa',
'upload_date': '20150705',
'uploader': 'Pimry_zaa',
'uploader_id': '1135760698325307392',
'like_count': int,
'comment_count': int,
'repost_count': int,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
@@ -65,25 +92,26 @@ class VineIE(InfoExtractor):
formats = [{
'format_id': '%(format)s-%(rate)s' % f,
'vcodec': f['format'],
'quality': f['rate'],
'vcodec': f.get('format'),
'quality': f.get('rate'),
'url': f['videoUrl'],
} for f in data['videoUrls']]
} for f in data['videoUrls'] if f.get('videoUrl')]
self._sort_formats(formats)
username = data.get('username')
return {
'id': video_id,
'title': self._og_search_title(webpage),
'alt_title': self._og_search_description(webpage, default=None),
'description': data['description'],
'thumbnail': data['thumbnailUrl'],
'upload_date': unified_strdate(data['created']),
'uploader': data['username'],
'uploader_id': data['userIdStr'],
'like_count': data['likes']['count'],
'comment_count': data['comments']['count'],
'repost_count': data['reposts']['count'],
'title': data.get('description') or self._og_search_title(webpage),
'alt_title': 'Vine by %s' % username if username else self._og_search_description(webpage, default=None),
'thumbnail': data.get('thumbnailUrl'),
'upload_date': unified_strdate(data.get('created')),
'uploader': username,
'uploader_id': data.get('userIdStr'),
'like_count': int_or_none(data.get('likes', {}).get('count')),
'comment_count': int_or_none(data.get('comments', {}).get('count')),
'repost_count': int_or_none(data.get('reposts', {}).get('count')),
'formats': formats,
}

View File

@@ -178,6 +178,52 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
return
class YoutubePlaylistBaseInfoExtractor(InfoExtractor):
# Extract the video ids from the playlist pages
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for video_id, video_title in self.extract_videos_from_page(content_html):
yield self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(self._VIDEO_RE, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
if video_title:
video_title = video_title.strip()
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
@@ -1419,7 +1465,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
@@ -1440,7 +1486,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
@@ -1557,37 +1603,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
else:
self.report_warning('Youtube gives an alert message: ' + match)
# Extract the video ids from the playlist pages
def _entries():
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.finditer(self._VIDEO_RE, content_html)
# We remove the duplicates and the link with index 0
# (it's not the first video of the playlist)
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
for vid_id in new_ids:
yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
page, 'title')
return self.playlist_result(_entries(), playlist_id, playlist_title)
return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
def _real_extract(self, url):
# Extract playlist id
@@ -1613,10 +1633,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
return self._extract_playlist(playlist_id)
class YoutubeChannelIE(InfoExtractor):
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
@@ -1627,22 +1648,6 @@ class YoutubeChannelIE(InfoExtractor):
}
}]
@staticmethod
def extract_videos_from_page(page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page):
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
def _real_extract(self, url):
channel_id = self._match_id(url)
@@ -1685,29 +1690,7 @@ class YoutubeChannelIE(InfoExtractor):
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
def _entries():
more_widget_html = content_html = channel_page
for pagenum in itertools.count(1):
for video_id, video_title in self.extract_videos_from_page(content_html):
yield self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
mobj = re.search(
r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), channel_id,
'Downloading page #%s' % (pagenum + 1),
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(_entries(), channel_id)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):

View File

@@ -3,6 +3,7 @@
from __future__ import unicode_literals
import base64
import calendar
import codecs
import contextlib
@@ -1795,6 +1796,10 @@ def urlhandle_detect_ext(url_handle):
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """

View File

@@ -1,3 +1,3 @@
from __future__ import unicode_literals
__version__ = '2015.10.16'
__version__ = '2015.10.18'