Compare commits
60 Commits
2014.09.24
...
2014.09.29
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a43ee88c6f | ||
![]() |
e2dce53781 | ||
![]() |
1770ed9e86 | ||
![]() |
457ac58cc7 | ||
![]() |
9c44d2429b | ||
![]() |
d2e32f7df5 | ||
![]() |
67077b182b | ||
![]() |
5f4c318844 | ||
![]() |
dfee83234b | ||
![]() |
7f5c0c4a19 | ||
![]() |
4bc77c8417 | ||
![]() |
22dd3fad86 | ||
![]() |
d6e6a42256 | ||
![]() |
76e7d1e74b | ||
![]() |
38c4d41b74 | ||
![]() |
f0b8e3607d | ||
![]() |
51ee08c4bb | ||
![]() |
c841789772 | ||
![]() |
c121a75b36 | ||
![]() |
5a8b77551d | ||
![]() |
0217aee154 | ||
![]() |
b14f3a4c1d | ||
![]() |
92f7963f6e | ||
![]() |
88fbe4c2cc | ||
![]() |
394599f422 | ||
![]() |
ed9266db90 | ||
![]() |
f4b1c7adb8 | ||
![]() |
c95eeb7b80 | ||
![]() |
5e43e3803c | ||
![]() |
a89435a7a8 | ||
![]() |
a0a90b3ba1 | ||
![]() |
c664182323 | ||
![]() |
6be1cd4ddb | ||
![]() |
ee0d90707a | ||
![]() |
f776d8f608 | ||
![]() |
b3ac3a51ac | ||
![]() |
0b75c2a88b | ||
![]() |
7b7518124e | ||
![]() |
68b0973046 | ||
![]() |
3a203b8bfa | ||
![]() |
70752ccefd | ||
![]() |
0155549d6c | ||
![]() |
b66745288e | ||
![]() |
2a1325fdde | ||
![]() |
2f9e8776df | ||
![]() |
497339fa0e | ||
![]() |
8e6f8051f0 | ||
![]() |
11b3ce8509 | ||
![]() |
6a5af6acb9 | ||
![]() |
9a0d98bb40 | ||
![]() |
fbd3162e49 | ||
![]() |
54e9a4af95 | ||
![]() |
8a32b82e46 | ||
![]() |
fec02bcc90 | ||
![]() |
c6e90caaa6 | ||
![]() |
4bbf157794 | ||
![]() |
6b08cdf626 | ||
![]() |
b686fc18da | ||
![]() |
746c67d72f | ||
![]() |
5aa38e75b2 |
@@ -442,8 +442,6 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
@@ -451,7 +449,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://yourextractor.com/watch/42',
|
||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
||||
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||
'info_dict': {
|
||||
'id': '42',
|
||||
'ext': 'mp4',
|
||||
@@ -466,8 +464,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
@@ -139,7 +139,9 @@ def generator(test_case):
|
||||
|
||||
if is_playlist:
|
||||
self.assertEqual(res_dict['_type'], 'playlist')
|
||||
self.assertTrue('entries' in res_dict)
|
||||
expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
|
||||
|
||||
if 'playlist_mincount' in test_case:
|
||||
assertGreaterEqual(
|
||||
self,
|
||||
@@ -188,7 +190,7 @@ def generator(test_case):
|
||||
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
|
||||
finally:
|
||||
try_rm_tcs_files()
|
||||
if is_playlist and res_dict is not None:
|
||||
if is_playlist and res_dict is not None and res_dict.get('entries'):
|
||||
# Remove all other files that may have been extracted if the
|
||||
# extractor returns full results even with extract_flat
|
||||
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
|
||||
|
@@ -22,7 +22,8 @@ from youtube_dl.utils import (
|
||||
fix_xml_ampersands,
|
||||
get_meta_content,
|
||||
orderedSet,
|
||||
PagedList,
|
||||
OnDemandPagedList,
|
||||
InAdvancePagedList,
|
||||
parse_duration,
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
@@ -246,10 +247,14 @@ class TestUtil(unittest.TestCase):
|
||||
for i in range(firstid, upto):
|
||||
yield i
|
||||
|
||||
pl = PagedList(get_page, pagesize)
|
||||
pl = OnDemandPagedList(get_page, pagesize)
|
||||
got = pl.getslice(*sliceargs)
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
|
||||
got = iapl.getslice(*sliceargs)
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
||||
testPL(5, 2, (1,), [1, 2, 3, 4])
|
||||
testPL(5, 2, (2,), [2, 3, 4])
|
||||
|
@@ -1250,12 +1250,13 @@ class YoutubeDL(object):
|
||||
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
|
||||
# To work around aforementioned issue we will replace request's original URL with
|
||||
# percent-encoded one
|
||||
url = req if isinstance(req, compat_str) else req.get_full_url()
|
||||
req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
|
||||
url = req if req_is_string else req.get_full_url()
|
||||
url_escaped = escape_url(url)
|
||||
|
||||
# Substitute URL if any change after escaping
|
||||
if url != url_escaped:
|
||||
if isinstance(req, compat_str):
|
||||
if req_is_string:
|
||||
req = url_escaped
|
||||
else:
|
||||
req = compat_urllib_request.Request(
|
||||
|
@@ -78,6 +78,7 @@ __authors__ = (
|
||||
'Hari Padmanaban',
|
||||
'Carlos Ramos',
|
||||
'5moufl',
|
||||
'lenaten',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
@@ -42,6 +42,7 @@ class FileDownloader(object):
|
||||
Subclasses of this one must re-define the real_download method.
|
||||
"""
|
||||
|
||||
_TEST_FILE_SIZE = 10241
|
||||
params = None
|
||||
|
||||
def __init__(self, ydl, params):
|
||||
|
@@ -7,6 +7,7 @@ import subprocess
|
||||
from .common import FileDownloader
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
compat_urllib_request,
|
||||
check_executable,
|
||||
encodeFilename,
|
||||
)
|
||||
@@ -71,15 +72,26 @@ class NativeHlsFD(FileDownloader):
|
||||
else compat_urlparse.urljoin(url, line))
|
||||
segment_urls.append(segment_url)
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||
byte_counter = 0
|
||||
with open(tmpfilename, 'wb') as outf:
|
||||
for i, segurl in enumerate(segment_urls):
|
||||
segment = self.ydl.urlopen(segurl).read()
|
||||
outf.write(segment)
|
||||
byte_counter += len(segment)
|
||||
self.to_screen(
|
||||
'[hlsnative] %s: Downloading segment %d / %d' %
|
||||
(info_dict['id'], i + 1, len(segment_urls)))
|
||||
seg_req = compat_urllib_request.Request(segurl)
|
||||
if remaining_bytes is not None:
|
||||
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||
|
||||
segment = self.ydl.urlopen(seg_req).read()
|
||||
if remaining_bytes is not None:
|
||||
segment = segment[:remaining_bytes]
|
||||
remaining_bytes -= len(segment)
|
||||
outf.write(segment)
|
||||
byte_counter += len(segment)
|
||||
if remaining_bytes is not None and remaining_bytes <= 0:
|
||||
break
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
|
@@ -14,8 +14,6 @@ from ..utils import (
|
||||
|
||||
|
||||
class HttpFD(FileDownloader):
|
||||
_TEST_FILE_SIZE = 10241
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
@@ -135,12 +135,14 @@ from .gametrailers import GametrailersIE
|
||||
from .gdcvault import GDCVaultIE
|
||||
from .generic import GenericIE
|
||||
from .godtube import GodTubeIE
|
||||
from .golem import GolemIE
|
||||
from .googleplus import GooglePlusIE
|
||||
from .googlesearch import GoogleSearchIE
|
||||
from .gorillavid import GorillaVidIE
|
||||
from .goshgay import GoshgayIE
|
||||
from .grooveshark import GroovesharkIE
|
||||
from .hark import HarkIE
|
||||
from .heise import HeiseIE
|
||||
from .helsinki import HelsinkiIE
|
||||
from .hentaistigma import HentaiStigmaIE
|
||||
from .hornbunny import HornBunnyIE
|
||||
@@ -261,6 +263,7 @@ from .nrk import (
|
||||
from .ntv import NTVIE
|
||||
from .nytimes import NYTimesIE
|
||||
from .nuvid import NuvidIE
|
||||
from .oktoberfesttv import OktoberfestTVIE
|
||||
from .ooyala import OoyalaIE
|
||||
from .orf import (
|
||||
ORFTVthekIE,
|
||||
@@ -271,6 +274,7 @@ from .parliamentliveuk import ParliamentLiveUKIE
|
||||
from .patreon import PatreonIE
|
||||
from .pbs import PBSIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .played import PlayedIE
|
||||
from .playfm import PlayFMIE
|
||||
from .playvid import PlayvidIE
|
||||
from .podomatic import PodomaticIE
|
||||
@@ -339,6 +343,7 @@ from .spankwire import SpankwireIE
|
||||
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||
from .spiegeltv import SpiegeltvIE
|
||||
from .spike import SpikeIE
|
||||
from .sport5 import Sport5IE
|
||||
from .sportdeutschland import SportDeutschlandIE
|
||||
from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .steam import SteamIE
|
||||
@@ -366,7 +371,10 @@ from .thisav import ThisAVIE
|
||||
from .tinypic import TinyPicIE
|
||||
from .tlc import TlcIE, TlcDeIE
|
||||
from .tnaflix import TNAFlixIE
|
||||
from .thvideo import THVideoIE
|
||||
from .thvideo import (
|
||||
THVideoIE,
|
||||
THVideoPlaylistIE
|
||||
)
|
||||
from .toutv import TouTvIE
|
||||
from .toypics import ToypicsUserIE, ToypicsIE
|
||||
from .traileraddict import TrailerAddictIE
|
||||
@@ -407,11 +415,12 @@ from .videoweed import VideoWeedIE
|
||||
from .vidme import VidmeIE
|
||||
from .vimeo import (
|
||||
VimeoIE,
|
||||
VimeoChannelIE,
|
||||
VimeoUserIE,
|
||||
VimeoAlbumIE,
|
||||
VimeoChannelIE,
|
||||
VimeoGroupsIE,
|
||||
VimeoLikesIE,
|
||||
VimeoReviewIE,
|
||||
VimeoUserIE,
|
||||
VimeoWatchLaterIE,
|
||||
)
|
||||
from .vimple import VimpleIE
|
||||
@@ -450,6 +459,7 @@ from .yahoo import (
|
||||
YahooNewsIE,
|
||||
YahooSearchIE,
|
||||
)
|
||||
from .ynet import YnetIE
|
||||
from .youjizz import YouJizzIE
|
||||
from .youku import YoukuIE
|
||||
from .youporn import YouPornIE
|
||||
|
@@ -22,8 +22,7 @@ class ABCIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
urls_info_json = self._search_regex(
|
||||
|
@@ -35,7 +35,7 @@ class AnySexIE(InfoExtractor):
|
||||
|
||||
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
|
||||
r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
|
||||
thumbnail = self._html_search_regex(
|
||||
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
|
||||
|
||||
@@ -43,7 +43,7 @@ class AnySexIE(InfoExtractor):
|
||||
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
|
||||
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'<b>Duration:</b> (\d+:\d+)', webpage, 'duration', fatal=False))
|
||||
r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
|
||||
view_count = int_or_none(self._html_search_regex(
|
||||
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
|
||||
|
||||
|
@@ -8,8 +8,6 @@ from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
qualities,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_parse,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
|
@@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import hashlib
|
||||
import json
|
||||
import netrc
|
||||
@@ -21,6 +22,7 @@ from ..utils import (
|
||||
clean_html,
|
||||
compiled_regex_type,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
RegexNotFoundError,
|
||||
sanitize_filename,
|
||||
@@ -164,6 +166,14 @@ class InfoExtractor(object):
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
return cls._VALID_URL_RE.match(url) is not None
|
||||
|
||||
@classmethod
|
||||
def _match_id(cls, url):
|
||||
if '_VALID_URL_RE' not in cls.__dict__:
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
m = cls._VALID_URL_RE.match(url)
|
||||
assert m
|
||||
return m.group('id')
|
||||
|
||||
@classmethod
|
||||
def working(cls):
|
||||
"""Getter method for _WORKING."""
|
||||
@@ -705,6 +715,34 @@ class InfoExtractor(object):
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
def _live_title(self, name):
|
||||
""" Generate the title for a live video """
|
||||
now = datetime.datetime.now()
|
||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
||||
return name + ' ' + now_str
|
||||
|
||||
def _int(self, v, name, fatal=False, **kwargs):
|
||||
res = int_or_none(v, **kwargs)
|
||||
if 'get_attr' in kwargs:
|
||||
print(getattr(v, kwargs['get_attr']))
|
||||
if res is None:
|
||||
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||
if fatal:
|
||||
raise ExtractorError(msg)
|
||||
else:
|
||||
self._downloader.report_warning(msg)
|
||||
return res
|
||||
|
||||
def _float(self, v, name, fatal=False, **kwargs):
|
||||
res = float_or_none(v, **kwargs)
|
||||
if res is None:
|
||||
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||
if fatal:
|
||||
raise ExtractorError(msg)
|
||||
else:
|
||||
self._downloader.report_warning(msg)
|
||||
return res
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
"""
|
||||
|
@@ -9,7 +9,7 @@ import xml.etree.ElementTree
|
||||
|
||||
from hashlib import sha1
|
||||
from math import pow, sqrt, floor
|
||||
from .common import InfoExtractor
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
compat_urllib_parse,
|
||||
@@ -26,7 +26,7 @@ from ..aes import (
|
||||
)
|
||||
|
||||
|
||||
class CrunchyrollIE(InfoExtractor):
|
||||
class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_TEST = {
|
||||
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||
@@ -271,6 +271,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
else:
|
||||
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
|
@@ -1,4 +1,6 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -7,20 +9,20 @@ from ..utils import ExtractorError
|
||||
|
||||
|
||||
class EitbIE(InfoExtractor):
|
||||
IE_NAME = u'eitb.tv'
|
||||
IE_NAME = 'eitb.tv'
|
||||
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'add_ie': ['Brightcove'],
|
||||
u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
||||
u'md5': u'edf4436247185adee3ea18ce64c47998',
|
||||
u'info_dict': {
|
||||
u'id': u'2743577154001',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'60 minutos (Lasa y Zabala, 30 años)',
|
||||
'add_ie': ['Brightcove'],
|
||||
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
||||
'md5': 'edf4436247185adee3ea18ce64c47998',
|
||||
'info_dict': {
|
||||
'id': '2743577154001',
|
||||
'ext': 'mp4',
|
||||
'title': '60 minutos (Lasa y Zabala, 30 años)',
|
||||
# All videos from eitb has this description in the brightcove info
|
||||
u'description': u'.',
|
||||
u'uploader': u'Euskal Telebista',
|
||||
'description': '.',
|
||||
'uploader': 'Euskal Telebista',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -30,7 +32,7 @@ class EitbIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||
if bc_url is None:
|
||||
raise ExtractorError(u'Could not extract the Brightcove url')
|
||||
raise ExtractorError('Could not extract the Brightcove url')
|
||||
# The BrightcoveExperience object doesn't contain the video id, we set
|
||||
# it manually
|
||||
bc_url += '&%40videoPlayer={0}'.format(chapter_id)
|
||||
|
@@ -7,6 +7,7 @@ from ..utils import (
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
compat_urllib_parse,
|
||||
str_to_int,
|
||||
)
|
||||
|
||||
|
||||
@@ -20,6 +21,7 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'Music Video 14 british euro brit european cumshots swallow',
|
||||
'uploader': 'unknown',
|
||||
'view_count': int,
|
||||
'age_limit': 18,
|
||||
}
|
||||
}, {
|
||||
@@ -39,8 +41,12 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
video_title = self._html_search_regex(
|
||||
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
|
||||
uploader = self._html_search_regex(
|
||||
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
|
||||
fatal=False)
|
||||
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
|
||||
webpage, 'uploader', fatal=False)
|
||||
view_count = str_to_int(self._html_search_regex(
|
||||
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
|
||||
webpage, 'view count', fatal=False))
|
||||
|
||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(
|
||||
r'video_url=(.+?)&', webpage, 'video_url'))
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
@@ -51,6 +57,7 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'uploader': uploader,
|
||||
'view_count': view_count,
|
||||
'url': video_url,
|
||||
'format': format,
|
||||
'format_id': format,
|
||||
|
@@ -382,14 +382,21 @@ class GenericIE(InfoExtractor):
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
},
|
||||
# Wistia embed
|
||||
{
|
||||
'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
|
||||
'md5': '8788b683c777a5cf25621eaf286d0c23',
|
||||
'info_dict': {
|
||||
'id': '1cfaf6b7ea',
|
||||
'ext': 'mov',
|
||||
'title': 'md5:51364a8d3d009997ba99656004b5e20d',
|
||||
'duration': 643.0,
|
||||
'filesize': 182808282,
|
||||
'uploader': 'education-portal.com',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def report_download_webpage(self, video_id):
|
||||
"""Report webpage download."""
|
||||
if not self._downloader.params.get('test', False):
|
||||
self._downloader.report_warning('Falling back on generic information extractor.')
|
||||
super(GenericIE, self).report_download_webpage(video_id)
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
|
||||
@@ -489,6 +496,7 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
url, smuggled_data = unsmuggle_url(url)
|
||||
force_videoid = None
|
||||
is_intentional = smuggled_data and smuggled_data.get('to_generic')
|
||||
if smuggled_data and 'force_videoid' in smuggled_data:
|
||||
force_videoid = smuggled_data['force_videoid']
|
||||
video_id = force_videoid
|
||||
@@ -531,6 +539,9 @@ class GenericIE(InfoExtractor):
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
if not self._downloader.params.get('test', False) and not is_intentional:
|
||||
self._downloader.report_warning('Falling back on generic information extractor.')
|
||||
|
||||
try:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
except ValueError:
|
||||
@@ -631,7 +642,7 @@ class GenericIE(InfoExtractor):
|
||||
)
|
||||
(["\'])
|
||||
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
|
||||
(?:embed|v)/.+?)
|
||||
(?:embed|v|p)/.+?)
|
||||
\1''', webpage)
|
||||
if matches:
|
||||
return _playlist_from_matches(
|
||||
@@ -656,6 +667,16 @@ class GenericIE(InfoExtractor):
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
}
|
||||
match = re.search(r'(?:id=["\']wistia_|data-wistiaid=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
|
||||
if match:
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
|
||||
'ie_key': 'Wistia',
|
||||
'uploader': video_uploader,
|
||||
'title': video_title,
|
||||
'id': match.group('id')
|
||||
}
|
||||
|
||||
# Look for embedded blip.tv player
|
||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||
|
71
youtube_dl/extractor/golem.py
Normal file
71
youtube_dl/extractor/golem.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class GolemIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
|
||||
_TEST = {
|
||||
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
|
||||
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
|
||||
'info_dict': {
|
||||
'id': '14095',
|
||||
'format_id': 'high',
|
||||
'ext': 'mp4',
|
||||
'title': 'iPhone 6 und 6 Plus - Test',
|
||||
'duration': 300.44,
|
||||
'filesize': 65309548,
|
||||
}
|
||||
}
|
||||
|
||||
_PREFIX = 'http://video.golem.de'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
config = self._download_xml(
|
||||
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': config.findtext('./title', 'golem'),
|
||||
'duration': self._float(config.findtext('./playtime'), 'duration'),
|
||||
}
|
||||
|
||||
formats = []
|
||||
for e in config.findall('./*[url]'):
|
||||
url = e.findtext('./url')
|
||||
if not url:
|
||||
self._downloader.report_warning(
|
||||
"{0}: url: empty, skipping".format(e.tag))
|
||||
continue
|
||||
|
||||
formats.append({
|
||||
'format_id': e.tag,
|
||||
'url': compat_urlparse.urljoin(self._PREFIX, url),
|
||||
'height': self._int(e.get('height'), 'height'),
|
||||
'width': self._int(e.get('width'), 'width'),
|
||||
'filesize': self._int(e.findtext('filesize'), 'filesize'),
|
||||
'ext': determine_ext(e.findtext('./filename')),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
info['formats'] = formats
|
||||
|
||||
thumbnails = []
|
||||
for e in config.findall('.//teaser[url]'):
|
||||
url = e.findtext('./url')
|
||||
if not url:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': compat_urlparse.urljoin(self._PREFIX, url),
|
||||
'width': self._int(e.get('width'), 'thumbnail width'),
|
||||
'height': self._int(e.get('height'), 'thumbnail height'),
|
||||
})
|
||||
info['thumbnails'] = thumbnails
|
||||
|
||||
return info
|
81
youtube_dl/extractor/heise.py
Normal file
81
youtube_dl/extractor/heise.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
get_meta_content,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class HeiseIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:www\.)?heise\.de/video/artikel/
|
||||
.+?(?P<id>[0-9]+)\.html(?:$|[?#])
|
||||
'''
|
||||
_TEST = {
|
||||
'url': (
|
||||
'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
|
||||
),
|
||||
'md5': 'ffed432483e922e88545ad9f2f15d30e',
|
||||
'info_dict': {
|
||||
'id': '2404147',
|
||||
'ext': 'mp4',
|
||||
'title': (
|
||||
"Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
|
||||
),
|
||||
'format_id': 'mp4_720',
|
||||
'timestamp': 1411812600,
|
||||
'upload_date': '20140927',
|
||||
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
json_url = self._search_regex(
|
||||
r'json_url:\s*"([^"]+)"', webpage, 'json URL')
|
||||
config = self._download_json(json_url, video_id)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'thumbnail': config.get('poster'),
|
||||
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
|
||||
'description': self._og_search_description(webpage),
|
||||
}
|
||||
|
||||
title = get_meta_content('fulltitle', webpage)
|
||||
if title:
|
||||
info['title'] = title
|
||||
elif config.get('title'):
|
||||
info['title'] = config['title']
|
||||
else:
|
||||
info['title'] = self._og_search_title(webpage)
|
||||
|
||||
formats = []
|
||||
for t, rs in config['formats'].items():
|
||||
if not rs or not hasattr(rs, 'items'):
|
||||
self._downloader.report_warning(
|
||||
'formats: {0}: no resolutions'.format(t))
|
||||
continue
|
||||
|
||||
for height_str, obj in rs.items():
|
||||
format_id = '{0}_{1}'.format(t, height_str)
|
||||
|
||||
if not obj or not obj.get('url'):
|
||||
self._downloader.report_warning(
|
||||
'formats: {0}: no url'.format(format_id))
|
||||
continue
|
||||
|
||||
formats.append({
|
||||
'url': obj['url'],
|
||||
'format_id': format_id,
|
||||
'height': self._int(height_str, 'height'),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
info['formats'] = formats
|
||||
|
||||
return info
|
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -23,6 +22,7 @@ class MuenchenTVIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'is_live': True,
|
||||
'thumbnail': 're:^https?://.*\.jpg$'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -33,9 +33,7 @@ class MuenchenTVIE(InfoExtractor):
|
||||
display_id = 'live'
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
now = datetime.datetime.now()
|
||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
||||
title = self._og_search_title(webpage) + ' ' + now_str
|
||||
title = self._live_title(self._og_search_title(webpage))
|
||||
|
||||
data_js = self._search_regex(
|
||||
r'(?s)\nplaylist:\s*(\[.*?}\]),related:',
|
||||
@@ -73,5 +71,6 @@ class MuenchenTVIE(InfoExtractor):
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'is_live': True,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
|
||||
|
@@ -6,6 +6,7 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
compat_urllib_parse,
|
||||
int_or_none,
|
||||
remove_end,
|
||||
)
|
||||
@@ -13,76 +14,116 @@ from ..utils import (
|
||||
|
||||
class NFLIE(InfoExtractor):
|
||||
IE_NAME = 'nfl.com'
|
||||
_VALID_URL = r'(?x)https?://(?:www\.)?nfl\.com/(?:videos/(?:.+)/|.*?\#video=)(?P<id>\d..[0-9]+)'
|
||||
_PLAYER_CONFIG_URL = 'http://www.nfl.com/static/content/static/config/video/config.json'
|
||||
_TEST = {
|
||||
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
|
||||
# 'md5': '5eb8c40a727dda106d510e5d6ffa79e5', # md5 checksum fluctuates
|
||||
'info_dict': {
|
||||
'id': '0ap3000000398478',
|
||||
'ext': 'mp4',
|
||||
'title': 'Week 3: Washington Redskins vs. Philadelphia Eagles highlights',
|
||||
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
|
||||
'upload_date': '20140921',
|
||||
'timestamp': 1411337580,
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
_VALID_URL = r'''(?x)https?://
|
||||
(?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
|
||||
(?:.+?/)*
|
||||
(?P<id>(?:\d[a-z]{2}\d{13}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
|
||||
'md5': '394ef771ddcd1354f665b471d78ec4c6',
|
||||
'info_dict': {
|
||||
'id': '0ap3000000398478',
|
||||
'ext': 'mp4',
|
||||
'title': 'Week 3: Redskins vs. Eagles highlights',
|
||||
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
|
||||
'upload_date': '20140921',
|
||||
'timestamp': 1411337580,
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
|
||||
'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
|
||||
'info_dict': {
|
||||
'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
|
||||
'ext': 'mp4',
|
||||
'title': 'LIVE: Post Game vs. Browns',
|
||||
'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
|
||||
'upload_date': '20131229',
|
||||
'timestamp': 1388354455,
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def prepend_host(host, url):
|
||||
if not url.startswith('http'):
|
||||
if not url.startswith('/'):
|
||||
url = '/%s' % url
|
||||
url = 'http://{0:}{1:}'.format(host, url)
|
||||
return url
|
||||
|
||||
@staticmethod
|
||||
def format_from_stream(stream, protocol, host, path_prefix='',
|
||||
preference=0, note=None):
|
||||
url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
|
||||
protocol=protocol,
|
||||
host=host,
|
||||
prefix=path_prefix,
|
||||
path=stream.get('path'),
|
||||
)
|
||||
return {
|
||||
'url': url,
|
||||
'vbr': int_or_none(stream.get('rate', 0), 1000),
|
||||
'preference': preference,
|
||||
'format_note': note,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id, host = mobj.group('id'), mobj.group('host')
|
||||
|
||||
config = self._download_json(self._PLAYER_CONFIG_URL, video_id,
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
config_url = NFLIE.prepend_host(host, self._search_regex(
|
||||
r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL'))
|
||||
config = self._download_json(config_url, video_id,
|
||||
note='Downloading player config')
|
||||
url_template = 'http://nfl.com{contentURLTemplate:s}'.format(**config)
|
||||
video_data = self._download_json(url_template.format(id=video_id), video_id)
|
||||
|
||||
cdns = config.get('cdns')
|
||||
if not cdns:
|
||||
raise ExtractorError('Failed to get CDN data', expected=True)
|
||||
url_template = NFLIE.prepend_host(
|
||||
host, '{contentURLTemplate:}'.format(**config))
|
||||
video_data = self._download_json(
|
||||
url_template.format(id=video_id), video_id)
|
||||
|
||||
formats = []
|
||||
streams = video_data.get('cdnData', {}).get('bitrateInfo', [])
|
||||
for name, cdn in cdns.items():
|
||||
# LimeLight streams don't seem to work
|
||||
if cdn.get('name') == 'LIMELIGHT':
|
||||
continue
|
||||
|
||||
protocol = cdn.get('protocol')
|
||||
host = remove_end(cdn.get('host', ''), '/')
|
||||
if not (protocol and host):
|
||||
continue
|
||||
|
||||
path_prefix = cdn.get('pathprefix', '')
|
||||
if path_prefix and not path_prefix.endswith('/'):
|
||||
path_prefix = '%s/' % path_prefix
|
||||
|
||||
get_url = lambda p: '{protocol:s}://{host:s}/{prefix:s}{path:}'.format(
|
||||
protocol=protocol,
|
||||
host=host,
|
||||
prefix=path_prefix,
|
||||
path=p,
|
||||
)
|
||||
|
||||
if protocol == 'rtmp':
|
||||
preference = -2
|
||||
elif 'prog' in name.lower():
|
||||
preference = -1
|
||||
else:
|
||||
preference = 0
|
||||
|
||||
cdn_data = video_data.get('cdnData', {})
|
||||
streams = cdn_data.get('bitrateInfo', [])
|
||||
if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
|
||||
parts = compat_urllib_parse.urlparse(cdn_data.get('uri'))
|
||||
protocol, host = parts.scheme, parts.netloc
|
||||
for stream in streams:
|
||||
path = stream.get('path')
|
||||
if not path:
|
||||
formats.append(
|
||||
NFLIE.format_from_stream(stream, protocol, host))
|
||||
else:
|
||||
cdns = config.get('cdns')
|
||||
if not cdns:
|
||||
raise ExtractorError('Failed to get CDN data', expected=True)
|
||||
|
||||
for name, cdn in cdns.items():
|
||||
# LimeLight streams don't seem to work
|
||||
if cdn.get('name') == 'LIMELIGHT':
|
||||
continue
|
||||
|
||||
formats.append({
|
||||
'url': get_url(path),
|
||||
'vbr': int_or_none(stream.get('rate', 0), 1000),
|
||||
'preference': preference,
|
||||
'format_note': name,
|
||||
})
|
||||
protocol = cdn.get('protocol')
|
||||
host = remove_end(cdn.get('host', ''), '/')
|
||||
if not (protocol and host):
|
||||
continue
|
||||
|
||||
prefix = cdn.get('pathprefix', '')
|
||||
if prefix and not prefix.endswith('/'):
|
||||
prefix = '%s/' % prefix
|
||||
|
||||
preference = 0
|
||||
if protocol == 'rtmp':
|
||||
preference = -2
|
||||
elif 'prog' in name.lower():
|
||||
preference = 1
|
||||
|
||||
for stream in streams:
|
||||
formats.append(
|
||||
NFLIE.format_from_stream(stream, protocol, host,
|
||||
prefix, preference, name))
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -94,7 +135,7 @@ class NFLIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_data.get('storyHeadline'),
|
||||
'title': video_data.get('headline'),
|
||||
'formats': formats,
|
||||
'description': video_data.get('caption'),
|
||||
'duration': video_data.get('duration'),
|
||||
|
47
youtube_dl/extractor/oktoberfesttv.py
Normal file
47
youtube_dl/extractor/oktoberfesttv.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class OktoberfestTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
|
||||
'info_dict': {
|
||||
'id': 'hb-zelt',
|
||||
'ext': 'mp4',
|
||||
'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'is_live': True,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = self._live_title(self._html_search_regex(
|
||||
r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
|
||||
|
||||
clip = self._search_regex(
|
||||
r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
|
||||
ncurl = self._search_regex(
|
||||
r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
|
||||
video_url = ncurl + clip
|
||||
thumbnail = self._search_regex(
|
||||
r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
|
||||
'thumbnail', fatal=False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'is_live': True,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
55
youtube_dl/extractor/played.py
Normal file
55
youtube_dl/extractor/played.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import os.path
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
|
||||
class PlayedIE(InfoExtractor):
|
||||
IE_NAME = 'played.to'
|
||||
_VALID_URL = r'https?://(?:www\.)?played\.to/(?P<id>[a-zA-Z0-9_-]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://played.to/j2f2sfiiukgt',
|
||||
'md5': 'c2bd75a368e82980e7257bf500c00637',
|
||||
'info_dict': {
|
||||
'id': 'j2f2sfiiukgt',
|
||||
'ext': 'flv',
|
||||
'title': 'youtube-dl_test_video.mp4',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
orig_webpage = self._download_webpage(url, video_id)
|
||||
fields = re.findall(
|
||||
r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
|
||||
data = dict(fields)
|
||||
|
||||
self._sleep(2, video_id)
|
||||
|
||||
post = compat_urllib_parse.urlencode(data)
|
||||
headers = {
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
}
|
||||
req = compat_urllib_request.Request(url, post, headers)
|
||||
webpage = self._download_webpage(
|
||||
req, video_id, note='Downloading video page ...')
|
||||
|
||||
title = os.path.splitext(data['fname'])[0]
|
||||
|
||||
video_url = self._search_regex(
|
||||
r'file: "?(.+?)",', webpage, 'video URL')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
}
|
92
youtube_dl/extractor/sport5.py
Normal file
92
youtube_dl/extractor/sport5.py
Normal file
@@ -0,0 +1,92 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class Sport5IE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1',
|
||||
'info_dict': {
|
||||
'id': 's5-Y59xx1-GUh2',
|
||||
'ext': 'mp4',
|
||||
'title': 'ולנסיה-קורדובה 0:3',
|
||||
'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה',
|
||||
'duration': 228,
|
||||
'categories': list,
|
||||
},
|
||||
'skip': 'Blocked outside of Israel',
|
||||
}, {
|
||||
'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE',
|
||||
'info_dict': {
|
||||
'id': 's5-SiXxx1-hKh2',
|
||||
'ext': 'mp4',
|
||||
'title': 'GOALS_CELTIC_270914.mp4',
|
||||
'description': '',
|
||||
'duration': 87,
|
||||
'categories': list,
|
||||
},
|
||||
'skip': 'Blocked outside of Israel',
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
media_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, media_id)
|
||||
|
||||
video_id = self._html_search_regex('clipId=([\w-]+)', webpage, 'video id')
|
||||
|
||||
metadata = self._download_xml(
|
||||
'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id,
|
||||
video_id)
|
||||
|
||||
error = metadata.find('./Error')
|
||||
if error is not None:
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s - %s' % (
|
||||
self.IE_NAME,
|
||||
error.find('./Name').text,
|
||||
error.find('./Description').text),
|
||||
expected=True)
|
||||
|
||||
title = metadata.find('./Title').text
|
||||
description = metadata.find('./Description').text
|
||||
duration = int(metadata.find('./Duration').text)
|
||||
|
||||
posters_el = metadata.find('./PosterLinks')
|
||||
thumbnails = [{
|
||||
'url': thumbnail.text,
|
||||
'width': int(thumbnail.get('width')),
|
||||
'height': int(thumbnail.get('height')),
|
||||
} for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else []
|
||||
|
||||
categories_el = metadata.find('./Categories')
|
||||
categories = [
|
||||
cat.get('name') for cat in categories_el.findall('./Category')
|
||||
] if categories_el is not None else []
|
||||
|
||||
formats = [{
|
||||
'url': fmt.text,
|
||||
'ext': 'mp4',
|
||||
'vbr': int(fmt.get('bitrate')),
|
||||
'width': int(fmt.get('width')),
|
||||
'height': int(fmt.get('height')),
|
||||
} for fmt in metadata.findall('./PlaybackLinks/FileURL')]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnails': thumbnails,
|
||||
'duration': duration,
|
||||
'categories': categories,
|
||||
'formats': formats,
|
||||
}
|
@@ -26,8 +26,7 @@ class THVideoIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# extract download link from mobile player page
|
||||
webpage_player = self._download_webpage(
|
||||
@@ -57,3 +56,29 @@ class THVideoIE(InfoExtractor):
|
||||
'description': description,
|
||||
'upload_date': upload_date
|
||||
}
|
||||
|
||||
|
||||
class THVideoPlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://thvideo.tv/mylist2',
|
||||
'info_dict': {
|
||||
'id': '2',
|
||||
'title': '幻想万華鏡',
|
||||
},
|
||||
'playlist_mincount': 23,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
list_title = self._html_search_regex(
|
||||
r'<h1 class="show_title">(.*?)<b id', webpage, 'playlist title',
|
||||
fatal=False)
|
||||
|
||||
entries = [
|
||||
self.url_result('http://thvideo.tv/v/th' + id, 'THVideo')
|
||||
for id in re.findall(r'<dd><a href="http://thvideo.tv/v/th(\d+)/" target=', webpage)]
|
||||
|
||||
return self.playlist_result(entries, playlist_id, list_title)
|
||||
|
@@ -19,7 +19,7 @@ class Vbox7IE(InfoExtractor):
|
||||
'md5': '99f65c0c9ef9b682b97313e052734c3f',
|
||||
'info_dict': {
|
||||
'id': '249bb972c2',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Смях! Чудо - чист за секунди - Скрита камера',
|
||||
},
|
||||
}
|
||||
@@ -50,7 +50,6 @@ class Vbox7IE(InfoExtractor):
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': 'flv',
|
||||
'title': title,
|
||||
'thumbnail': thumbnail_url,
|
||||
}
|
||||
|
@@ -5,7 +5,6 @@ import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_HTTPError,
|
||||
compat_urllib_request,
|
||||
ExtractorError,
|
||||
)
|
||||
@@ -25,7 +24,7 @@ class VevoIE(InfoExtractor):
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
|
||||
"md5": "06bea460acb744eab74a9d7dcb4bfd61",
|
||||
"md5": "95ee28ee45e70130e3ab02b0f579ae23",
|
||||
'info_dict': {
|
||||
'id': 'GB1101300280',
|
||||
'ext': 'mp4',
|
||||
@@ -41,7 +40,7 @@ class VevoIE(InfoExtractor):
|
||||
}, {
|
||||
'note': 'v3 SMIL format',
|
||||
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
|
||||
'md5': '893ec0e0d4426a1d96c01de8f2bdff58',
|
||||
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
|
||||
'info_dict': {
|
||||
'id': 'USUV71302923',
|
||||
'ext': 'mp4',
|
||||
|
@@ -8,17 +8,19 @@ import itertools
|
||||
from .common import InfoExtractor
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
compat_HTTPError,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
clean_html,
|
||||
get_element_by_attribute,
|
||||
compat_urlparse,
|
||||
ExtractorError,
|
||||
get_element_by_attribute,
|
||||
InAdvancePagedList,
|
||||
int_or_none,
|
||||
RegexNotFoundError,
|
||||
std_headers,
|
||||
unsmuggle_url,
|
||||
urlencode_postdata,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
@@ -529,3 +531,58 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
|
||||
|
||||
|
||||
class VimeoLikesIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
|
||||
IE_NAME = 'vimeo:likes'
|
||||
IE_DESC = 'Vimeo user likes'
|
||||
_TEST = {
|
||||
'url': 'https://vimeo.com/user755559/likes/',
|
||||
'playlist_mincount': 293,
|
||||
"info_dict": {
|
||||
"description": "See all the videos urza likes",
|
||||
"title": 'Videos urza likes',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, user_id)
|
||||
page_count = self._int(
|
||||
self._search_regex(
|
||||
r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
|
||||
.*?</a></li>\s*<li\s+class="pagination_next">
|
||||
''', webpage, 'page count'),
|
||||
'page count', fatal=True)
|
||||
PAGE_SIZE = 12
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
|
||||
description = self._html_search_meta('description', webpage)
|
||||
|
||||
def _get_page(idx):
|
||||
page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
|
||||
self.http_scheme(), user_id, idx + 1)
|
||||
webpage = self._download_webpage(
|
||||
page_url, user_id,
|
||||
note='Downloading page %d/%d' % (idx + 1, page_count))
|
||||
video_list = self._search_regex(
|
||||
r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
|
||||
webpage, 'video content')
|
||||
paths = re.findall(
|
||||
r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
|
||||
for path in paths:
|
||||
yield {
|
||||
'_type': 'url',
|
||||
'url': compat_urlparse.urljoin(page_url, path),
|
||||
}
|
||||
|
||||
pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': 'user%s_likes' % user_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'entries': pl,
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
compat_str,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
@@ -16,6 +17,24 @@ class VubeIE(InfoExtractor):
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
|
||||
'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
|
||||
'info_dict': {
|
||||
'id': 'Y8NUZ69Tf7',
|
||||
'ext': 'mp4',
|
||||
'title': 'Best Drummer Ever [HD]',
|
||||
'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'uploader': 'William',
|
||||
'timestamp': 1406876915,
|
||||
'upload_date': '20140801',
|
||||
'duration': 258.051,
|
||||
'like_count': int,
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
|
||||
},
|
||||
}, {
|
||||
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
|
||||
'md5': 'db7aba89d4603dadd627e9d1973946fe',
|
||||
'info_dict': {
|
||||
@@ -32,7 +51,8 @@ class VubeIE(InfoExtractor):
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
|
||||
}
|
||||
},
|
||||
'skip': 'Removed due to DMCA',
|
||||
},
|
||||
{
|
||||
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
|
||||
@@ -51,7 +71,8 @@ class VubeIE(InfoExtractor):
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
|
||||
}
|
||||
},
|
||||
'skip': 'Removed due to DMCA',
|
||||
}, {
|
||||
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
|
||||
'md5': '0584fc13b50f887127d9d1007589d27f',
|
||||
@@ -69,7 +90,8 @@ class VubeIE(InfoExtractor):
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
|
||||
}
|
||||
},
|
||||
'skip': 'Removed due to DMCA',
|
||||
}
|
||||
]
|
||||
|
||||
@@ -102,6 +124,11 @@ class VubeIE(InfoExtractor):
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
if not formats and video.get('vst') == 'dmca':
|
||||
raise ExtractorError(
|
||||
'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
|
||||
expected=True)
|
||||
|
||||
title = video['title']
|
||||
description = video.get('description')
|
||||
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
|
||||
|
@@ -40,6 +40,7 @@ class WatIE(InfoExtractor):
|
||||
'upload_date': '20140816',
|
||||
'duration': 2910,
|
||||
},
|
||||
'skip': "Ce contenu n'est pas disponible pour l'instant.",
|
||||
},
|
||||
]
|
||||
|
||||
|
@@ -1,13 +1,14 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, compat_urllib_request
|
||||
|
||||
|
||||
class WistiaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
|
||||
_API_URL = 'http://fast.wistia.com/embed/medias/{0:}.json'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
|
||||
@@ -24,11 +25,13 @@ class WistiaIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
data_json = self._html_search_regex(
|
||||
r'Wistia\.iframeInit\((.*?), {}\);', webpage, 'video data')
|
||||
|
||||
data = json.loads(data_json)
|
||||
request = compat_urllib_request.Request(self._API_URL.format(video_id))
|
||||
request.add_header('Referer', url) # Some videos require this.
|
||||
data_json = self._download_json(request, video_id)
|
||||
if data_json.get('error'):
|
||||
raise ExtractorError('Error while getting the playlist',
|
||||
expected=True)
|
||||
data = data_json['media']
|
||||
|
||||
formats = []
|
||||
thumbnails = []
|
||||
|
54
youtube_dl/extractor/ynet.py
Normal file
54
youtube_dl/extractor/ynet.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import compat_urllib_parse
|
||||
|
||||
|
||||
class YnetIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:.+?\.)?ynet\.co\.il/(?:.+?/)?0,7340,(?P<id>L(?:-[0-9]+)+),00\.html'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
|
||||
'md5': '002b44ee2f33d50363a1c153bed524cf',
|
||||
'info_dict': {
|
||||
'id': 'L-11659-99244',
|
||||
'ext': 'flv',
|
||||
'title': 'איש לא יודע מאיפה באנו',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
|
||||
'md5': '6455046ae1b48cf7e2b7cae285e53a16',
|
||||
'info_dict': {
|
||||
'id': 'L-8859-84418',
|
||||
'ext': 'flv',
|
||||
'title': "צפו: הנשיקה הלוהטת של תורגי' ויוליה פלוטקין",
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
content = compat_urllib_parse.unquote_plus(self._og_search_video_url(webpage))
|
||||
config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
|
||||
f4m_url = config['clip']['url']
|
||||
title = self._og_search_title(webpage)
|
||||
m = re.search(r'ynet - HOT -- (["\']+)(?P<title>.+?)\1', title)
|
||||
if m:
|
||||
title = m.group('title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': self._extract_f4m_formats(f4m_url, video_id),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
|
||||
import json
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import math
|
||||
import random
|
||||
import re
|
||||
@@ -13,18 +14,25 @@ from ..utils import (
|
||||
|
||||
|
||||
class YoukuIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:(?:http://)?(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|youku:)(?P<ID>[A-Za-z0-9]+)(?:\.html|/v\.swf|)'
|
||||
_TEST = {
|
||||
u"url": u"http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
|
||||
u"file": u"XNDgyMDQ2NTQw_part00.flv",
|
||||
u"md5": u"ffe3f2e435663dc2d1eea34faeff5b5b",
|
||||
u"params": {u"test": False},
|
||||
u"info_dict": {
|
||||
u"title": u"youtube-dl test video \"'/\\ä↭𝕐"
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:
|
||||
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
|
||||
youku:)
|
||||
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
|
||||
'''
|
||||
_TEST = {
|
||||
'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html',
|
||||
'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b',
|
||||
'params': {
|
||||
'test': False
|
||||
},
|
||||
'info_dict': {
|
||||
'id': 'XNDgyMDQ2NTQw_part00',
|
||||
'ext': 'flv',
|
||||
'title': 'youtube-dl test video "\'/\\ä↭𝕐'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _gen_sid(self):
|
||||
nowTime = int(time.time() * 1000)
|
||||
random1 = random.randint(1000,1998)
|
||||
@@ -55,49 +63,42 @@ class YoukuIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
video_id = mobj.group('ID')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
|
||||
|
||||
jsondata = self._download_webpage(info_url, video_id)
|
||||
config = self._download_json(info_url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
try:
|
||||
config = json.loads(jsondata)
|
||||
error_code = config['data'][0].get('error_code')
|
||||
if error_code:
|
||||
# -8 means blocked outside China.
|
||||
error = config['data'][0].get('error') # Chinese and English, separated by newline.
|
||||
raise ExtractorError(error or u'Server reported error %i' % error_code,
|
||||
expected=True)
|
||||
error_code = config['data'][0].get('error_code')
|
||||
if error_code:
|
||||
# -8 means blocked outside China.
|
||||
error = config['data'][0].get('error') # Chinese and English, separated by newline.
|
||||
raise ExtractorError(error or 'Server reported error %i' % error_code,
|
||||
expected=True)
|
||||
|
||||
video_title = config['data'][0]['title']
|
||||
seed = config['data'][0]['seed']
|
||||
video_title = config['data'][0]['title']
|
||||
seed = config['data'][0]['seed']
|
||||
|
||||
format = self._downloader.params.get('format', None)
|
||||
supported_format = list(config['data'][0]['streamfileids'].keys())
|
||||
format = self._downloader.params.get('format', None)
|
||||
supported_format = list(config['data'][0]['streamfileids'].keys())
|
||||
|
||||
if format is None or format == 'best':
|
||||
if 'hd2' in supported_format:
|
||||
format = 'hd2'
|
||||
else:
|
||||
format = 'flv'
|
||||
ext = u'flv'
|
||||
elif format == 'worst':
|
||||
format = 'mp4'
|
||||
ext = u'mp4'
|
||||
# TODO proper format selection
|
||||
if format is None or format == 'best':
|
||||
if 'hd2' in supported_format:
|
||||
format = 'hd2'
|
||||
else:
|
||||
format = 'flv'
|
||||
ext = u'flv'
|
||||
ext = 'flv'
|
||||
elif format == 'worst':
|
||||
format = 'mp4'
|
||||
ext = 'mp4'
|
||||
else:
|
||||
format = 'flv'
|
||||
ext = 'flv'
|
||||
|
||||
|
||||
fileid = config['data'][0]['streamfileids'][format]
|
||||
keys = [s['k'] for s in config['data'][0]['segs'][format]]
|
||||
# segs is usually a dictionary, but an empty *list* if an error occured.
|
||||
except (UnicodeDecodeError, ValueError, KeyError):
|
||||
raise ExtractorError(u'Unable to extract info section')
|
||||
fileid = config['data'][0]['streamfileids'][format]
|
||||
keys = [s['k'] for s in config['data'][0]['segs'][format]]
|
||||
# segs is usually a dictionary, but an empty *list* if an error occured.
|
||||
|
||||
files_info=[]
|
||||
sid = self._gen_sid()
|
||||
@@ -106,9 +107,8 @@ class YoukuIE(InfoExtractor):
|
||||
#column 8,9 of fileid represent the segment number
|
||||
#fileid[7:9] should be changed
|
||||
for index, key in enumerate(keys):
|
||||
|
||||
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
|
||||
download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
||||
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
||||
|
||||
info = {
|
||||
'id': '%s_part%02d' % (video_id, index),
|
||||
|
@@ -26,7 +26,7 @@ from ..utils import (
|
||||
get_element_by_attribute,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
PagedList,
|
||||
OnDemandPagedList,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
orderedSet,
|
||||
@@ -655,7 +655,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
|
||||
# Get video webpage
|
||||
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
|
||||
video_webpage = self._download_webpage(url, video_id)
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('Cookie', 'PREF=hl=en')
|
||||
video_webpage = self._download_webpage(req, video_id)
|
||||
|
||||
# Attempt to extract SWF player URL
|
||||
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
|
||||
@@ -1068,6 +1070,13 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||
'info_dict': {
|
||||
'title': 'JODA15',
|
||||
}
|
||||
}, {
|
||||
'note': 'Embedded SWF player',
|
||||
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
|
||||
'playlist_count': 4,
|
||||
'info_dict': {
|
||||
'title': 'JODA7',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_initialize(self):
|
||||
@@ -1334,7 +1343,7 @@ class YoutubeUserIE(InfoExtractor):
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
}
|
||||
url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
|
||||
url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
|
||||
|
||||
return self.playlist_result(url_results, playlist_title=username)
|
||||
|
||||
|
@@ -87,7 +87,7 @@ def parseOpts(overrideArguments=None):
|
||||
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
||||
try:
|
||||
i = opts.index(private_opt)
|
||||
opts[i+1] = '<PRIVATE>'
|
||||
opts[i+1] = 'PRIVATE'
|
||||
except ValueError:
|
||||
pass
|
||||
return opts
|
||||
|
@@ -1384,14 +1384,16 @@ def check_executable(exe, args=[]):
|
||||
|
||||
|
||||
class PagedList(object):
|
||||
def __init__(self, pagefunc, pagesize):
|
||||
self._pagefunc = pagefunc
|
||||
self._pagesize = pagesize
|
||||
|
||||
def __len__(self):
|
||||
# This is only useful for tests
|
||||
return len(self.getslice())
|
||||
|
||||
|
||||
class OnDemandPagedList(PagedList):
|
||||
def __init__(self, pagefunc, pagesize):
|
||||
self._pagefunc = pagefunc
|
||||
self._pagesize = pagesize
|
||||
|
||||
def getslice(self, start=0, end=None):
|
||||
res = []
|
||||
for pagenum in itertools.count(start // self._pagesize):
|
||||
@@ -1430,6 +1432,35 @@ class PagedList(object):
|
||||
return res
|
||||
|
||||
|
||||
class InAdvancePagedList(PagedList):
|
||||
def __init__(self, pagefunc, pagecount, pagesize):
|
||||
self._pagefunc = pagefunc
|
||||
self._pagecount = pagecount
|
||||
self._pagesize = pagesize
|
||||
|
||||
def getslice(self, start=0, end=None):
|
||||
res = []
|
||||
start_page = start // self._pagesize
|
||||
end_page = (
|
||||
self._pagecount if end is None else (end // self._pagesize + 1))
|
||||
skip_elems = start - start_page * self._pagesize
|
||||
only_more = None if end is None else end - start
|
||||
for pagenum in range(start_page, end_page):
|
||||
page = list(self._pagefunc(pagenum))
|
||||
if skip_elems:
|
||||
page = page[skip_elems:]
|
||||
skip_elems = None
|
||||
if only_more is not None:
|
||||
if len(page) < only_more:
|
||||
only_more -= len(page)
|
||||
else:
|
||||
page = page[:only_more]
|
||||
res.extend(page)
|
||||
break
|
||||
res.extend(page)
|
||||
return res
|
||||
|
||||
|
||||
def uppercase_escape(s):
|
||||
unicode_escape = codecs.getdecoder('unicode_escape')
|
||||
return re.sub(
|
||||
|
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2014.09.24.1'
|
||||
__version__ = '2014.09.29'
|
||||
|
Reference in New Issue
Block a user