Compare commits
13 Commits
2017.03.10
...
2017.03.15
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5db83d79bf | ||
![]() |
2a751e137f | ||
![]() |
398887b4c0 | ||
![]() |
66bf351f80 | ||
![]() |
9d08963022 | ||
![]() |
e313d209c2 | ||
![]() |
ff9d509d20 | ||
![]() |
c1795ca6c8 | ||
![]() |
8c99623259 | ||
![]() |
57b0ddb35f | ||
![]() |
a28f8d7396 | ||
![]() |
7049799470 | ||
![]() |
4605c94d1a |
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@@ -6,8 +6,8 @@
|
||||
|
||||
---
|
||||
|
||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.10*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.10**
|
||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.15*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.15**
|
||||
|
||||
### Before submitting an *issue* make sure you have:
|
||||
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||
@@ -35,7 +35,7 @@ $ youtube-dl -v <your command line>
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2017.03.10
|
||||
[debug] youtube-dl version 2017.03.15
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
15
ChangeLog
15
ChangeLog
@@ -1,3 +1,18 @@
|
||||
version 2017.03.15
|
||||
|
||||
Core
|
||||
* Fix missing subtitles if --add-metadata is used (#12423)
|
||||
|
||||
Extractors
|
||||
* [facebook] Make title optional (#12443)
|
||||
+ [mitele] Add support for ooyala videos (#12430)
|
||||
* [openload] Fix extraction (#12435, #12446)
|
||||
* [streamable] Update API URL (#12433)
|
||||
+ [crunchyroll] Extract season name (#12428)
|
||||
* [discoverygo] Bypass geo restriction
|
||||
+ [discoverygo:playlist] Add support for playlists (#12424)
|
||||
|
||||
|
||||
version 2017.03.10
|
||||
|
||||
Extractors
|
||||
|
@@ -208,6 +208,7 @@
|
||||
- **Digiteka**
|
||||
- **Discovery**
|
||||
- **DiscoveryGo**
|
||||
- **DiscoveryGoPlaylist**
|
||||
- **Disney**
|
||||
- **Dotsub**
|
||||
- **DouyuTV**: 斗鱼
|
||||
|
@@ -259,6 +259,16 @@ def _real_main(argv=None):
|
||||
'key': 'FFmpegVideoConvertor',
|
||||
'preferedformat': opts.recodevideo,
|
||||
})
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
# metadata (3gp, webm, etc.)
|
||||
# And this post-processor should be placed before other metadata
|
||||
# manipulating post-processors (FFmpegEmbedSubtitle) to prevent loss of
|
||||
# extra metadata. By default ffmpeg preserves metadata applicable for both
|
||||
# source and target containers. From this point the container won't change,
|
||||
# so metadata can be added here.
|
||||
if opts.addmetadata:
|
||||
postprocessors.append({'key': 'FFmpegMetadata'})
|
||||
if opts.convertsubtitles:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegSubtitlesConvertor',
|
||||
@@ -276,11 +286,6 @@ def _real_main(argv=None):
|
||||
})
|
||||
if not already_have_thumbnail:
|
||||
opts.writethumbnail = True
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
# metadata (3gp, webm, etc.)
|
||||
if opts.addmetadata:
|
||||
postprocessors.append({'key': 'FFmpegMetadata'})
|
||||
# XAttrMetadataPP should be run after post-processors that may change file
|
||||
# contents
|
||||
if opts.xattrs:
|
||||
|
@@ -177,6 +177,7 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
'uploader': 'Kadokawa Pictures Inc.',
|
||||
'upload_date': '20170118',
|
||||
'series': "KONOSUBA -God's blessing on this wonderful world!",
|
||||
'season': "KONOSUBA -God's blessing on this wonderful world! 2",
|
||||
'season_number': 2,
|
||||
'episode': 'Give Me Deliverance from this Judicial Injustice!',
|
||||
'episode_number': 1,
|
||||
@@ -222,6 +223,23 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
# just test metadata extraction
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# A video with a vastly different season name compared to the series name
|
||||
'url': 'http://www.crunchyroll.com/nyarko-san-another-crawling-chaos/episode-1-test-590532',
|
||||
'info_dict': {
|
||||
'id': '590532',
|
||||
'ext': 'mp4',
|
||||
'title': 'Haiyoru! Nyaruani (ONA) Episode 1 – Test',
|
||||
'description': 'Mahiro and Nyaruko talk about official certification.',
|
||||
'uploader': 'TV TOKYO',
|
||||
'upload_date': '20120305',
|
||||
'series': 'Nyarko-san: Another Crawling Chaos',
|
||||
'season': 'Haiyoru! Nyaruani (ONA)',
|
||||
},
|
||||
'params': {
|
||||
# Just test metadata extraction
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
_FORMAT_IDS = {
|
||||
@@ -491,7 +509,8 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
# webpage provide more accurate data than series_title from XML
|
||||
series = self._html_search_regex(
|
||||
r'id=["\']showmedia_about_episode_num[^>]+>\s*<a[^>]+>([^<]+)',
|
||||
webpage, 'series', default=xpath_text(metadata, 'series_title'))
|
||||
webpage, 'series', fatal=False)
|
||||
season = xpath_text(metadata, 'series_title')
|
||||
|
||||
episode = xpath_text(metadata, 'episode_title')
|
||||
episode_number = int_or_none(xpath_text(metadata, 'episode_number'))
|
||||
@@ -508,6 +527,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
'uploader': video_uploader,
|
||||
'upload_date': video_upload_date,
|
||||
'series': series,
|
||||
'season': season,
|
||||
'season_number': season_number,
|
||||
'episode': episode,
|
||||
'episode_number': episode_number,
|
||||
|
@@ -1,17 +1,21 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_age_limit,
|
||||
ExtractorError,
|
||||
remove_end,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class DiscoveryGoIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:
|
||||
class DiscoveryGoBaseIE(InfoExtractor):
|
||||
_VALID_URL_TEMPLATE = r'''(?x)https?://(?:www\.)?(?:
|
||||
discovery|
|
||||
investigationdiscovery|
|
||||
discoverylife|
|
||||
@@ -21,18 +25,23 @@ class DiscoveryGoIE(InfoExtractor):
|
||||
sciencechannel|
|
||||
tlc|
|
||||
velocitychannel
|
||||
)go\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'''
|
||||
)go\.com/%s(?P<id>[^/?#&]+)'''
|
||||
|
||||
|
||||
class DiscoveryGoIE(DiscoveryGoBaseIE):
|
||||
_VALID_URL = DiscoveryGoBaseIE._VALID_URL_TEMPLATE % r'(?:[^/]+/)+'
|
||||
_GEO_COUNTRIES = ['US']
|
||||
_TEST = {
|
||||
'url': 'https://www.discoverygo.com/love-at-first-kiss/kiss-first-ask-questions-later/',
|
||||
'url': 'https://www.discoverygo.com/bering-sea-gold/reaper-madness/',
|
||||
'info_dict': {
|
||||
'id': '57a33c536b66d1cd0345eeb1',
|
||||
'id': '58c167d86b66d12f2addeb01',
|
||||
'ext': 'mp4',
|
||||
'title': 'Kiss First, Ask Questions Later!',
|
||||
'description': 'md5:fe923ba34050eae468bffae10831cb22',
|
||||
'duration': 2579,
|
||||
'series': 'Love at First Kiss',
|
||||
'season_number': 1,
|
||||
'episode_number': 1,
|
||||
'title': 'Reaper Madness',
|
||||
'description': 'md5:09f2c625c99afb8946ed4fb7865f6e78',
|
||||
'duration': 2519,
|
||||
'series': 'Bering Sea Gold',
|
||||
'season_number': 8,
|
||||
'episode_number': 6,
|
||||
'age_limit': 14,
|
||||
},
|
||||
}
|
||||
@@ -113,3 +122,46 @@ class DiscoveryGoIE(InfoExtractor):
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
|
||||
class DiscoveryGoPlaylistIE(DiscoveryGoBaseIE):
|
||||
_VALID_URL = DiscoveryGoBaseIE._VALID_URL_TEMPLATE % ''
|
||||
_TEST = {
|
||||
'url': 'https://www.discoverygo.com/bering-sea-gold/',
|
||||
'info_dict': {
|
||||
'id': 'bering-sea-gold',
|
||||
'title': 'Bering Sea Gold',
|
||||
'description': 'md5:cc5c6489835949043c0cc3ad66c2fa0e',
|
||||
},
|
||||
'playlist_mincount': 6,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if DiscoveryGoIE.suitable(url) else super(
|
||||
DiscoveryGoPlaylistIE, cls).suitable(url)
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
entries = []
|
||||
for mobj in re.finditer(r'data-json=(["\'])(?P<json>{.+?})\1', webpage):
|
||||
data = self._parse_json(
|
||||
mobj.group('json'), display_id,
|
||||
transform_source=unescapeHTML, fatal=False)
|
||||
if not isinstance(data, dict) or data.get('type') != 'episode':
|
||||
continue
|
||||
episode_url = data.get('socialUrl')
|
||||
if not episode_url:
|
||||
continue
|
||||
entries.append(self.url_result(
|
||||
episode_url, ie=DiscoveryGoIE.ie_key(),
|
||||
video_id=data.get('id')))
|
||||
|
||||
return self.playlist_result(
|
||||
entries, display_id,
|
||||
remove_end(self._og_search_title(
|
||||
webpage, fatal=False), ' | Discovery GO'),
|
||||
self._og_search_description(webpage))
|
||||
|
@@ -265,7 +265,10 @@ from .dvtv import DVTVIE
|
||||
from .dumpert import DumpertIE
|
||||
from .defense import DefenseGouvFrIE
|
||||
from .discovery import DiscoveryIE
|
||||
from .discoverygo import DiscoveryGoIE
|
||||
from .discoverygo import (
|
||||
DiscoveryGoIE,
|
||||
DiscoveryGoPlaylistIE,
|
||||
)
|
||||
from .disney import DisneyIE
|
||||
from .dispeak import DigitallySpeakingIE
|
||||
from .dropbox import DropboxIE
|
||||
|
@@ -196,6 +196,10 @@ class FacebookIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://www.facebookcorewwwi.onion/video.php?v=274175099429670',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# no title
|
||||
'url': 'https://www.facebook.com/onlycleverentertainment/videos/1947995502095005/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
@@ -353,15 +357,15 @@ class FacebookIE(InfoExtractor):
|
||||
self._sort_formats(formats)
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
|
||||
default=None)
|
||||
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage,
|
||||
'title', default=None)
|
||||
if not video_title:
|
||||
video_title = self._html_search_regex(
|
||||
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
|
||||
webpage, 'alternative title', default=None)
|
||||
if not video_title:
|
||||
video_title = self._html_search_meta(
|
||||
'description', webpage, 'title')
|
||||
'description', webpage, 'title', default=None)
|
||||
if video_title:
|
||||
video_title = limit_length(video_title, 80)
|
||||
else:
|
||||
|
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .ooyala import OoyalaIE
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse_urlencode,
|
||||
@@ -24,6 +25,9 @@ class MiTeleBaseIE(InfoExtractor):
|
||||
r'(?s)(<ms-video-player.+?</ms-video-player>)',
|
||||
webpage, 'ms video player'))
|
||||
video_id = player_data['data-media-id']
|
||||
if player_data.get('data-cms-id') == 'ooyala':
|
||||
return self.url_result(
|
||||
'ooyala:%s' % video_id, ie=OoyalaIE.ie_key(), video_id=video_id)
|
||||
config_url = compat_urlparse.urljoin(url, player_data['data-config'])
|
||||
config = self._download_json(
|
||||
config_url, video_id, 'Downloading config JSON')
|
||||
|
@@ -96,14 +96,16 @@ class OpenloadIE(InfoExtractor):
|
||||
h = 0
|
||||
|
||||
while h < len(v):
|
||||
B = v[h:h + 2]
|
||||
B = v[h:h + 3]
|
||||
i = int(B, 16)
|
||||
index = (h / 2) % 10
|
||||
if (h / 3) % 3 == 0:
|
||||
i = int(B, 8)
|
||||
index = (h / 3) % 10
|
||||
A = hashMap[index]
|
||||
i = i ^ 137
|
||||
i = i ^ 47
|
||||
i = i ^ A
|
||||
video_url_chars.append(compat_chr(i))
|
||||
h += 2
|
||||
h += 3
|
||||
|
||||
video_url = 'https://openload.co/stream/%s?mime=true'
|
||||
video_url = video_url % (''.join(video_url_chars))
|
||||
|
@@ -65,7 +65,7 @@ class StreamableIE(InfoExtractor):
|
||||
# to return video info like the title properly sometimes, and doesn't
|
||||
# include info like the video duration
|
||||
video = self._download_json(
|
||||
'https://streamable.com/ajax/videos/%s' % video_id, video_id)
|
||||
'https://ajax.streamable.com/videos/%s' % video_id, video_id)
|
||||
|
||||
# Format IDs:
|
||||
# 0 The video is being uploaded
|
||||
|
@@ -44,6 +44,10 @@ class TelecincoIE(MiTeleBaseIE):
|
||||
}, {
|
||||
'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# ooyala video
|
||||
'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '2017.03.10'
|
||||
__version__ = '2017.03.15'
|
||||
|
Reference in New Issue
Block a user