Compare commits

...

7 Commits

Author SHA1 Message Date
Philipp Hagemeister
d2b194607c release 2014.04.02 2014-04-02 14:26:34 +02:00
Jaime Marquínez Ferrándiz
f6177462db [youtube] feeds: Also look for the html in the 'content_html' field (fixes #2671) 2014-04-02 14:13:08 +02:00
Jaime Marquínez Ferrándiz
9ddaf4ef8c [comedycentral] Change XPath .//guid to ./guid (fixes #2668)
It fails to find the element in python 2.6 and it's not required, the
element is a direct child of the item node.
2014-04-01 21:38:07 +02:00
Jaime Marquínez Ferrándiz
97b5573848 [comedycentral] Update test title for 34cbc7ee8d 2014-04-01 21:29:40 +02:00
Jaime Marquínez Ferrándiz
18c95c1ab0 [rutube] Use _download_json 2014-04-01 20:30:22 +02:00
Sergey M․
0479c625a4 [brightcove] Encode object_str with utf-8 2014-04-01 20:17:35 +07:00
Sergey M․
f659951e22 [vk] Support optional dash for oid in embedded links 2014-04-01 19:38:42 +07:00
7 changed files with 25 additions and 20 deletions

View File

@@ -87,7 +87,7 @@ class BrightcoveIE(InfoExtractor):
object_str = object_str.replace('<--', '<!--')
object_str = fix_xml_ampersands(object_str)
object_doc = xml.etree.ElementTree.fromstring(object_str)
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:

View File

@@ -59,7 +59,7 @@ class ComedyCentralShowsIE(InfoExtractor):
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow-kristen-stewart part 1',
'title': 'thedailyshow kristen-stewart part 1',
}
}
@@ -165,7 +165,7 @@ class ComedyCentralShowsIE(InfoExtractor):
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('.//guid').text.rpartition(':')[-1]
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,

View File

@@ -82,6 +82,17 @@ class GenericIE(InfoExtractor):
},
'add_ie': ['Brightcove'],
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',

View File

@@ -2,7 +2,6 @@
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
@@ -39,17 +38,15 @@ class RutubeIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
api_response = self._download_webpage(
video = self._download_json(
'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON')
video = json.loads(api_response)
api_response = self._download_webpage(
trackinfo = self._download_json(
'http://rutube.ru/api/play/trackinfo/%s/?format=json' % video_id,
video_id, 'Downloading trackinfo JSON')
trackinfo = json.loads(api_response)
# Some videos don't have the author field
author = trackinfo.get('author') or {}
m3u8_url = trackinfo['video_balancer'].get('m3u8')
@@ -82,10 +79,9 @@ class RutubeChannelIE(InfoExtractor):
def _extract_videos(self, channel_id, channel_title=None):
entries = []
for pagenum in itertools.count(1):
api_response = self._download_webpage(
page = self._download_json(
self._PAGE_TEMPLATE % (channel_id, pagenum),
channel_id, 'Downloading page %s' % pagenum)
page = json.loads(api_response)
results = page['results']
if not results:
break
@@ -111,10 +107,9 @@ class RutubeMovieIE(RutubeChannelIE):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
movie_id = mobj.group('id')
api_response = self._download_webpage(
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
movie = json.loads(api_response)
movie_name = movie['name']
return self._extract_videos(movie_id, movie_name)

View File

@@ -16,7 +16,7 @@ from ..utils import (
class VKIE(InfoExtractor):
IE_NAME = 'vk.com'
_VALID_URL = r'https?://vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>\d+).*?\bid=(?P<id>\d+)|(?:videos.*?\?.*?z=)?video(?P<videoid>.*?)(?:\?|%2F|$))'
_VALID_URL = r'https?://vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:videos.*?\?.*?z=)?video(?P<videoid>.*?)(?:\?|%2F|$))'
_NETRC_MACHINE = 'vk'
_TESTS = [

View File

@@ -1738,11 +1738,10 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
feed_entries = []
paging = 0
for i in itertools.count(1):
info = self._download_webpage(self._FEED_TEMPLATE % paging,
info = self._download_json(self._FEED_TEMPLATE % paging,
u'%s feed' % self._FEED_NAME,
u'Downloading page %s' % i)
info = json.loads(info)
feed_html = info['feed_html']
feed_html = info.get('feed_html') or info.get('content_html')
m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
ids = orderedSet(m.group(1) for m in m_ids)
feed_entries.extend(

View File

@@ -1,2 +1,2 @@
__version__ = '2014.04.01.3'
__version__ = '2014.04.02'