Compare commits
134 Commits
2015.01.09
...
2015.01.23
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c994e6bd63 | ||
|
|
3ee2aa7a16 | ||
|
|
083c9df93b | ||
|
|
50789175ed | ||
|
|
dc1b027cd4 | ||
|
|
f353cbdb2f | ||
|
|
73e449b226 | ||
|
|
b4a64c592b | ||
|
|
78111136db | ||
|
|
650ab5beeb | ||
|
|
7932de6352 | ||
|
|
240b9b7a5c | ||
|
|
bb6e38787d | ||
|
|
898c23c03f | ||
|
|
b55ee18ff3 | ||
|
|
e5763a7a7e | ||
|
|
8bb1bdfae9 | ||
|
|
c62b449765 | ||
|
|
bb0aa4cb3c | ||
|
|
d63528c8c7 | ||
|
|
c5db6bb32b | ||
|
|
c8dc41a6e7 | ||
|
|
47e0e1e0e2 | ||
|
|
efcddaebe9 | ||
|
|
5fe5112589 | ||
|
|
564bb5e964 | ||
|
|
2df54b4ba8 | ||
|
|
030aa5d9e7 | ||
|
|
c511f13f22 | ||
|
|
fdb2ed7455 | ||
|
|
ba319696a9 | ||
|
|
910c552052 | ||
|
|
cce81f192c | ||
|
|
9d22a7dfb0 | ||
|
|
4f4f642822 | ||
|
|
2875cf01bb | ||
|
|
e205db3bcd | ||
|
|
31d4a6e212 | ||
|
|
aaeb86f682 | ||
|
|
9fa6ea2680 | ||
|
|
a9b6b5cd15 | ||
|
|
a45c0a5d67 | ||
|
|
c8dfe360eb | ||
|
|
4cfaf85c65 | ||
|
|
be5f2c192c | ||
|
|
c9ef44ce29 | ||
|
|
e92d4a11f5 | ||
|
|
f2cbc96c3e | ||
|
|
a69801e2c6 | ||
|
|
034206cec1 | ||
|
|
04e0bac233 | ||
|
|
fbef83f399 | ||
|
|
a5fb718c50 | ||
|
|
227d4822ff | ||
|
|
5c4a81d934 | ||
|
|
263255eb8d | ||
|
|
8e2ec95575 | ||
|
|
8e7a9016d5 | ||
|
|
c85f368370 | ||
|
|
a0977064ce | ||
|
|
15aecd8711 | ||
|
|
20dd0b2d20 | ||
|
|
f934860a07 | ||
|
|
2aeb06d6dc | ||
|
|
6ccbb335d2 | ||
|
|
4340decad2 | ||
|
|
f3ff1a3696 | ||
|
|
aa24de39aa | ||
|
|
a798e64c15 | ||
|
|
6a5fa75490 | ||
|
|
8ad6b5ed9f | ||
|
|
d5bb814d34 | ||
|
|
d156a1d981 | ||
|
|
987493aef3 | ||
|
|
8bfa75451b | ||
|
|
c071733fd4 | ||
|
|
cd3063f3fa | ||
|
|
58b1f00d19 | ||
|
|
149f05c7b6 | ||
|
|
8a1b9b068e | ||
|
|
c5a59d9391 | ||
|
|
500b8b41c1 | ||
|
|
be4a824d74 | ||
|
|
ed3958d714 | ||
|
|
6ce08764a1 | ||
|
|
c80ede5f13 | ||
|
|
bc694039e4 | ||
|
|
3462af03e6 | ||
|
|
ea1d5bdcdd | ||
|
|
121c09c7be | ||
|
|
76bfaf6daf | ||
|
|
d89c6e336a | ||
|
|
776dc3992a | ||
|
|
27ca82ebc6 | ||
|
|
385f8ae468 | ||
|
|
b9f030cc26 | ||
|
|
52afb2ac1b | ||
|
|
43bc88903d | ||
|
|
6ef9f88299 | ||
|
|
f71fdb0acc | ||
|
|
c24dfef63c | ||
|
|
6271f1cad9 | ||
|
|
fb4b030aaf | ||
|
|
ff21a8e0ee | ||
|
|
904fffffeb | ||
|
|
51897bb77c | ||
|
|
bd1a281ede | ||
|
|
45598f1578 | ||
|
|
d02115f837 | ||
|
|
34c781a24d | ||
|
|
1302394603 | ||
|
|
dd622d7c4e | ||
|
|
d120e9013f | ||
|
|
b8da6b9fc6 | ||
|
|
4baea47c42 | ||
|
|
176cf9e0c3 | ||
|
|
7b6faddfc8 | ||
|
|
f90ad27375 | ||
|
|
230b2287dd | ||
|
|
ff0813313a | ||
|
|
defaf19f5d | ||
|
|
754f0008ec | ||
|
|
2415951ead | ||
|
|
995ad69c54 | ||
|
|
225e4b9633 | ||
|
|
6ce2c6783b | ||
|
|
29f400b97d | ||
|
|
0cd64bd077 | ||
|
|
0551a02b82 | ||
|
|
25fadd06d0 | ||
|
|
7a47d07c6d | ||
|
|
34e48bed3b | ||
|
|
7b61ac3ddf | ||
|
|
c816336cbd |
3
AUTHORS
3
AUTHORS
@@ -101,3 +101,6 @@ Thijs Vermeir
|
||||
Joel Leclerc
|
||||
Christopher Krooss
|
||||
Ondřej Caletka
|
||||
Dinesh S
|
||||
Johan K. Jensen
|
||||
Yen Chi Hsuan
|
||||
|
||||
70
README.md
70
README.md
@@ -60,10 +60,6 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
they would handle
|
||||
--extractor-descriptions Output descriptions of all supported
|
||||
extractors
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in
|
||||
an empty string (--proxy "") for direct
|
||||
connection
|
||||
--socket-timeout None Time to wait before giving up, in seconds
|
||||
--default-search PREFIX Use this prefix for unqualified URLs. For
|
||||
example "gvsearch2:" downloads two videos
|
||||
from google videos for youtube-dl "large
|
||||
@@ -82,6 +78,18 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--flat-playlist Do not extract the videos of a playlist,
|
||||
only list them.
|
||||
|
||||
## Network Options:
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in
|
||||
an empty string (--proxy "") for direct
|
||||
connection
|
||||
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
||||
--source-address IP Client-side IP address to bind to
|
||||
(experimental)
|
||||
-4, --force-ipv4 Make all connections via IPv4
|
||||
(experimental)
|
||||
-6, --force-ipv6 Make all connections via IPv6
|
||||
(experimental)
|
||||
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
@@ -231,6 +239,10 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
files in the current directory to debug
|
||||
problems
|
||||
--print-traffic Display sent and read HTTP traffic
|
||||
-C, --call-home Contact the youtube-dl server for
|
||||
debugging.
|
||||
--no-call-home Do NOT contact the youtube-dl server for
|
||||
debugging.
|
||||
|
||||
## Workarounds:
|
||||
--encoding ENCODING Force the specified encoding (experimental)
|
||||
@@ -255,10 +267,22 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
by extension for the extensions aac, m4a,
|
||||
mp3, mp4, ogg, wav, webm. You can also use
|
||||
the special names "best", "bestvideo",
|
||||
"bestaudio", "worst". By default, youtube-
|
||||
dl will pick the best quality. Use commas
|
||||
to download multiple audio formats, such as
|
||||
-f
|
||||
"bestaudio", "worst". You can filter the
|
||||
video results by putting a condition in
|
||||
brackets, as in -f "best[height=720]" (or
|
||||
-f "[filesize>10M]"). This works for
|
||||
filesize, height, width, tbr, abr, and vbr
|
||||
and the comparisons <, <=, >, >=, =, != .
|
||||
Formats for which the value is not known
|
||||
are excluded unless you put a question mark
|
||||
(?) after the operator. You can combine
|
||||
format filters, so -f "[height <=?
|
||||
720][tbr>500]" selects up to 720p videos
|
||||
(or videos where the height is not known)
|
||||
with a bitrate of at least 500 KBit/s. By
|
||||
default, youtube-dl will pick the best
|
||||
quality. Use commas to download multiple
|
||||
audio formats, such as -f
|
||||
136/137/mp4/bestvideo,140/m4a/bestaudio.
|
||||
You can merge the video and audio of two
|
||||
formats into a single file using -f <video-
|
||||
@@ -272,6 +296,10 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
-F, --list-formats list all available formats
|
||||
--youtube-skip-dash-manifest Do not download the DASH manifest on
|
||||
YouTube videos
|
||||
--merge-output-format FORMAT If a merge is required (e.g.
|
||||
bestvideo+bestaudio), output to given
|
||||
container format. One of mkv, mp4, ogg,
|
||||
webm, flv.Ignored if no merge is required
|
||||
|
||||
## Subtitle Options:
|
||||
--write-sub write subtitle file
|
||||
@@ -288,7 +316,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME login with this account ID
|
||||
-p, --password PASSWORD account password
|
||||
-p, --password PASSWORD account password. If this option is left
|
||||
out, youtube-dl will ask interactively.
|
||||
-2, --twofactor TWOFACTOR two-factor auth code
|
||||
-n, --netrc use .netrc authentication data
|
||||
--video-password PASSWORD video password (vimeo, smotri)
|
||||
@@ -318,6 +347,11 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--add-metadata write metadata to the video file
|
||||
--xattrs write metadata to the video file's xattrs
|
||||
(using dublin core and xdg standards)
|
||||
--fixup POLICY (experimental) Automatically correct known
|
||||
faults of the file. One of never (do
|
||||
nothing), warn (only emit a warning),
|
||||
detect_or_warn(check whether we can do
|
||||
anything about it, warn otherwise
|
||||
--prefer-avconv Prefer avconv over ffmpeg for running the
|
||||
postprocessors (default)
|
||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
|
||||
@@ -423,9 +457,15 @@ Apparently YouTube requires you to pass a CAPTCHA test if you download too much.
|
||||
|
||||
Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/).
|
||||
|
||||
### The links provided by youtube-dl -g are not working anymore
|
||||
### I extracted a video URL with -g, but it does not play on another machine / in my webbrowser.
|
||||
|
||||
The URLs youtube-dl outputs require the downloader to have the correct cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
|
||||
It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
|
||||
|
||||
It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule.
|
||||
|
||||
Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using -g, your own downloader must support these as well.
|
||||
|
||||
If you want to play the video on a machine that is not running youtube-dl, you can relay the video content from the machine that runs youtube-dl. You can use `-o -` to let youtube-dl stream a video to stdout, or simply allow the player to download the files written by youtube-dl in turn.
|
||||
|
||||
### ERROR: no fmt_url_map or conn information found in video info
|
||||
|
||||
@@ -460,6 +500,10 @@ To make a different directory work - either for ffmpeg, or for youtube-dl, or fo
|
||||
|
||||
From then on, after restarting your shell, you will be able to access both youtube-dl and ffmpeg (and youtube-dl will be able to find ffmpeg) by simply typing `youtube-dl` or `ffmpeg`, no matter what directory you're in.
|
||||
|
||||
### How do I put downloads into a specific folder?
|
||||
|
||||
Use the `-o` to specify an [output template](#output-template), for example `-o "/home/user/videos/%(title)s-%(id)s.%(ext)s"`. If you want this for all of your downloads, put the option into your [configuration file](#configuration).
|
||||
|
||||
### How can I detect whether a given URL is supported by youtube-dl?
|
||||
|
||||
For one, have a look at the [list of supported sites](docs/supportedsites). Note that it can sometimes happen that the site changes its URL scheme (say, from http://example.com/v/1234567 to http://example.com/v/1234567 ) and youtube-dl reports an URL of a service in that list as unsupported. In that case, simply report a bug.
|
||||
@@ -608,7 +652,9 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||
|
||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode.
|
||||
|
||||
Please include the full output of the command when run with `--verbose`. The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||
**Please include the full output of youtube-dl when run with `-v`**.
|
||||
|
||||
The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||
|
||||
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||
|
||||
|
||||
@@ -110,6 +110,20 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
else:
|
||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||
got = 'md5:' + md5(got_dict.get(info_field))
|
||||
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
|
||||
got = got_dict.get(info_field)
|
||||
self.assertTrue(
|
||||
isinstance(got, list),
|
||||
'Expected field %s to be a list, but it is of type %s' % (
|
||||
info_field, type(got).__name__))
|
||||
expected_num = int(expected.partition(':')[2])
|
||||
assertGreaterEqual(
|
||||
self, len(got), expected_num,
|
||||
'Expected %d items in field %s, but only got %d' % (
|
||||
expected_num, info_field, len(got)
|
||||
)
|
||||
)
|
||||
continue
|
||||
else:
|
||||
got = got_dict.get(info_field)
|
||||
self.assertEqual(expected, got,
|
||||
|
||||
@@ -281,6 +281,61 @@ class TestFormatSelection(unittest.TestCase):
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], f1id)
|
||||
|
||||
def test_format_filtering(self):
|
||||
formats = [
|
||||
{'format_id': 'A', 'filesize': 500, 'width': 1000},
|
||||
{'format_id': 'B', 'filesize': 1000, 'width': 500},
|
||||
{'format_id': 'C', 'filesize': 1000, 'width': 400},
|
||||
{'format_id': 'D', 'filesize': 2000, 'width': 600},
|
||||
{'format_id': 'E', 'filesize': 3000},
|
||||
{'format_id': 'F'},
|
||||
{'format_id': 'G', 'filesize': 1000000},
|
||||
]
|
||||
for f in formats:
|
||||
f['url'] = 'http://_/'
|
||||
f['ext'] = 'unknown'
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
ydl = YDL({'format': 'best[filesize<3000]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'D')
|
||||
|
||||
ydl = YDL({'format': 'best[filesize<=3000]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'E')
|
||||
|
||||
ydl = YDL({'format': 'best[filesize <= ? 3000]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'F')
|
||||
|
||||
ydl = YDL({'format': 'best [filesize = 1000] [width>450]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'B')
|
||||
|
||||
ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'C')
|
||||
|
||||
ydl = YDL({'format': '[filesize>?1]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'G')
|
||||
|
||||
ydl = YDL({'format': '[filesize<1M]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'E')
|
||||
|
||||
ydl = YDL({'format': '[filesize<1MiB]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'G')
|
||||
|
||||
def test_add_extra_info(self):
|
||||
test_dict = {
|
||||
'extractor': 'Foo',
|
||||
|
||||
@@ -14,7 +14,6 @@ from test.helper import gettestcases
|
||||
from youtube_dl.extractor import (
|
||||
FacebookIE,
|
||||
gen_extractors,
|
||||
TwitchIE,
|
||||
YoutubeIE,
|
||||
)
|
||||
|
||||
@@ -72,18 +71,6 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
||||
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
||||
|
||||
def test_twitch_channelid_matching(self):
|
||||
self.assertTrue(TwitchIE.suitable('twitch.tv/vanillatv'))
|
||||
self.assertTrue(TwitchIE.suitable('www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/'))
|
||||
|
||||
def test_twitch_videoid_matching(self):
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
|
||||
|
||||
def test_twitch_chapterid_matching(self):
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
|
||||
|
||||
def test_youtube_extract(self):
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
@@ -115,8 +102,6 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
self.assertMatch(':ythistory', ['youtube:history'])
|
||||
self.assertMatch(':thedailyshow', ['ComedyCentralShows'])
|
||||
self.assertMatch(':tds', ['ComedyCentralShows'])
|
||||
self.assertMatch(':colbertreport', ['ComedyCentralShows'])
|
||||
self.assertMatch(':cr', ['ComedyCentralShows'])
|
||||
|
||||
def test_vimeo_matching(self):
|
||||
self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
|
||||
|
||||
@@ -79,6 +79,10 @@ class TestUtil(unittest.TestCase):
|
||||
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
|
||||
self.assertEqual(sanitize_filename(tests), tests)
|
||||
|
||||
self.assertEqual(
|
||||
sanitize_filename('New World record at 0:12:34'),
|
||||
'New World record at 0_12_34')
|
||||
|
||||
forbidden = '"\0\\/'
|
||||
for fc in forbidden:
|
||||
for fbc in forbidden:
|
||||
@@ -144,6 +148,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
|
||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
|
||||
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
||||
self.assertEqual(
|
||||
@@ -208,6 +213,8 @@ class TestUtil(unittest.TestCase):
|
||||
|
||||
def test_parse_duration(self):
|
||||
self.assertEqual(parse_duration(None), None)
|
||||
self.assertEqual(parse_duration(False), None)
|
||||
self.assertEqual(parse_duration('invalid'), None)
|
||||
self.assertEqual(parse_duration('1'), 1)
|
||||
self.assertEqual(parse_duration('1337:12'), 80232)
|
||||
self.assertEqual(parse_duration('9:12:43'), 33163)
|
||||
|
||||
@@ -10,6 +10,7 @@ import io
|
||||
import itertools
|
||||
import json
|
||||
import locale
|
||||
import operator
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
@@ -49,6 +50,7 @@ from .utils import (
|
||||
make_HTTPS_handler,
|
||||
MaxDownloadsReached,
|
||||
PagedList,
|
||||
parse_filesize,
|
||||
PostProcessingError,
|
||||
platform_name,
|
||||
preferredencoding,
|
||||
@@ -58,6 +60,7 @@ from .utils import (
|
||||
takewhile_inclusive,
|
||||
UnavailableVideoError,
|
||||
url_basename,
|
||||
version_tuple,
|
||||
write_json_file,
|
||||
write_string,
|
||||
YoutubeDLHandler,
|
||||
@@ -70,6 +73,7 @@ from .extractor import get_info_extractor, gen_extractors
|
||||
from .downloader import get_suitable_downloader
|
||||
from .downloader.rtmp import rtmpdump_version
|
||||
from .postprocessor import (
|
||||
FFmpegFixupStretchedPP,
|
||||
FFmpegMergerPP,
|
||||
FFmpegPostProcessor,
|
||||
get_postprocessor,
|
||||
@@ -203,6 +207,16 @@ class YoutubeDL(object):
|
||||
|
||||
Progress hooks are guaranteed to be called at least once
|
||||
(with status "finished") if the download is successful.
|
||||
merge_output_format: Extension to use when merging formats.
|
||||
fixup: Automatically correct known faults of the file.
|
||||
One of:
|
||||
- "never": do nothing
|
||||
- "warn": only emit a warning
|
||||
- "detect_or_warn": check whether we can do anything
|
||||
about it, warn otherwise
|
||||
source_address: (Experimental) Client-side IP address to bind to.
|
||||
call_home: Boolean, true iff we are allowed to contact the
|
||||
youtube-dl servers for debugging.
|
||||
|
||||
|
||||
The following parameters are not used by YoutubeDL itself, they are used by
|
||||
@@ -756,7 +770,59 @@ class YoutubeDL(object):
|
||||
else:
|
||||
raise Exception('Invalid result type: %s' % result_type)
|
||||
|
||||
def _apply_format_filter(self, format_spec, available_formats):
|
||||
" Returns a tuple of the remaining format_spec and filtered formats "
|
||||
|
||||
OPERATORS = {
|
||||
'<': operator.lt,
|
||||
'<=': operator.le,
|
||||
'>': operator.gt,
|
||||
'>=': operator.ge,
|
||||
'=': operator.eq,
|
||||
'!=': operator.ne,
|
||||
}
|
||||
operator_rex = re.compile(r'''(?x)\s*\[
|
||||
(?P<key>width|height|tbr|abr|vbr|filesize)
|
||||
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
|
||||
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
|
||||
\]$
|
||||
''' % '|'.join(map(re.escape, OPERATORS.keys())))
|
||||
m = operator_rex.search(format_spec)
|
||||
if not m:
|
||||
raise ValueError('Invalid format specification %r' % format_spec)
|
||||
|
||||
try:
|
||||
comparison_value = int(m.group('value'))
|
||||
except ValueError:
|
||||
comparison_value = parse_filesize(m.group('value'))
|
||||
if comparison_value is None:
|
||||
comparison_value = parse_filesize(m.group('value') + 'B')
|
||||
if comparison_value is None:
|
||||
raise ValueError(
|
||||
'Invalid value %r in format specification %r' % (
|
||||
m.group('value'), format_spec))
|
||||
op = OPERATORS[m.group('op')]
|
||||
|
||||
def _filter(f):
|
||||
actual_value = f.get(m.group('key'))
|
||||
if actual_value is None:
|
||||
return m.group('none_inclusive')
|
||||
return op(actual_value, comparison_value)
|
||||
new_formats = [f for f in available_formats if _filter(f)]
|
||||
|
||||
new_format_spec = format_spec[:-len(m.group(0))]
|
||||
if not new_format_spec:
|
||||
new_format_spec = 'best'
|
||||
|
||||
return (new_format_spec, new_formats)
|
||||
|
||||
def select_format(self, format_spec, available_formats):
|
||||
while format_spec.endswith(']'):
|
||||
format_spec, available_formats = self._apply_format_filter(
|
||||
format_spec, available_formats)
|
||||
if not available_formats:
|
||||
return None
|
||||
|
||||
if format_spec == 'best' or format_spec is None:
|
||||
return available_formats[-1]
|
||||
elif format_spec == 'worst':
|
||||
@@ -909,10 +975,24 @@ class YoutubeDL(object):
|
||||
'contain the video, try using '
|
||||
'"-f %s+%s"' % (format_2, format_1))
|
||||
return
|
||||
output_ext = (
|
||||
formats_info[0]['ext']
|
||||
if self.params.get('merge_output_format') is None
|
||||
else self.params['merge_output_format'])
|
||||
selected_format = {
|
||||
'requested_formats': formats_info,
|
||||
'format': rf,
|
||||
'ext': formats_info[0]['ext'],
|
||||
'width': formats_info[0].get('width'),
|
||||
'height': formats_info[0].get('height'),
|
||||
'resolution': formats_info[0].get('resolution'),
|
||||
'fps': formats_info[0].get('fps'),
|
||||
'vcodec': formats_info[0].get('vcodec'),
|
||||
'vbr': formats_info[0].get('vbr'),
|
||||
'stretched_ratio': formats_info[0].get('stretched_ratio'),
|
||||
'acodec': formats_info[1].get('acodec'),
|
||||
'abr': formats_info[1].get('abr'),
|
||||
'ext': output_ext,
|
||||
}
|
||||
else:
|
||||
selected_format = None
|
||||
@@ -1095,51 +1175,69 @@ class YoutubeDL(object):
|
||||
(info_dict['thumbnail'], compat_str(err)))
|
||||
|
||||
if not self.params.get('skip_download', False):
|
||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
|
||||
success = True
|
||||
else:
|
||||
try:
|
||||
def dl(name, info):
|
||||
fd = get_suitable_downloader(info)(self, self.params)
|
||||
for ph in self._progress_hooks:
|
||||
fd.add_progress_hook(ph)
|
||||
if self.params.get('verbose'):
|
||||
self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
|
||||
return fd.download(name, info)
|
||||
if info_dict.get('requested_formats') is not None:
|
||||
downloaded = []
|
||||
success = True
|
||||
merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
|
||||
if not merger._executable:
|
||||
postprocessors = []
|
||||
self.report_warning('You have requested multiple '
|
||||
'formats but ffmpeg or avconv are not installed.'
|
||||
' The formats won\'t be merged')
|
||||
else:
|
||||
postprocessors = [merger]
|
||||
for f in info_dict['requested_formats']:
|
||||
new_info = dict(info_dict)
|
||||
new_info.update(f)
|
||||
fname = self.prepare_filename(new_info)
|
||||
fname = prepend_extension(fname, 'f%s' % f['format_id'])
|
||||
downloaded.append(fname)
|
||||
partial_success = dl(fname, new_info)
|
||||
success = success and partial_success
|
||||
info_dict['__postprocessors'] = postprocessors
|
||||
info_dict['__files_to_merge'] = downloaded
|
||||
try:
|
||||
def dl(name, info):
|
||||
fd = get_suitable_downloader(info)(self, self.params)
|
||||
for ph in self._progress_hooks:
|
||||
fd.add_progress_hook(ph)
|
||||
if self.params.get('verbose'):
|
||||
self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
|
||||
return fd.download(name, info)
|
||||
if info_dict.get('requested_formats') is not None:
|
||||
downloaded = []
|
||||
success = True
|
||||
merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
|
||||
if not merger._executable:
|
||||
postprocessors = []
|
||||
self.report_warning('You have requested multiple '
|
||||
'formats but ffmpeg or avconv are not installed.'
|
||||
' The formats won\'t be merged')
|
||||
else:
|
||||
# Just a single file
|
||||
success = dl(filename, info_dict)
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self.report_error('unable to download video data: %s' % str(err))
|
||||
return
|
||||
except (OSError, IOError) as err:
|
||||
raise UnavailableVideoError(err)
|
||||
except (ContentTooShortError, ) as err:
|
||||
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||
return
|
||||
postprocessors = [merger]
|
||||
for f in info_dict['requested_formats']:
|
||||
new_info = dict(info_dict)
|
||||
new_info.update(f)
|
||||
fname = self.prepare_filename(new_info)
|
||||
fname = prepend_extension(fname, 'f%s' % f['format_id'])
|
||||
downloaded.append(fname)
|
||||
partial_success = dl(fname, new_info)
|
||||
success = success and partial_success
|
||||
info_dict['__postprocessors'] = postprocessors
|
||||
info_dict['__files_to_merge'] = downloaded
|
||||
else:
|
||||
# Just a single file
|
||||
success = dl(filename, info_dict)
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self.report_error('unable to download video data: %s' % str(err))
|
||||
return
|
||||
except (OSError, IOError) as err:
|
||||
raise UnavailableVideoError(err)
|
||||
except (ContentTooShortError, ) as err:
|
||||
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||
return
|
||||
|
||||
if success:
|
||||
# Fixup content
|
||||
stretched_ratio = info_dict.get('stretched_ratio')
|
||||
if stretched_ratio is not None and stretched_ratio != 1:
|
||||
fixup_policy = self.params.get('fixup')
|
||||
if fixup_policy is None:
|
||||
fixup_policy = 'detect_or_warn'
|
||||
if fixup_policy == 'warn':
|
||||
self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
|
||||
info_dict['id'], stretched_ratio))
|
||||
elif fixup_policy == 'detect_or_warn':
|
||||
stretched_pp = FFmpegFixupStretchedPP(self)
|
||||
if stretched_pp.available:
|
||||
info_dict.setdefault('__postprocessors', [])
|
||||
info_dict['__postprocessors'].append(stretched_pp)
|
||||
else:
|
||||
self.report_warning(
|
||||
'%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % (
|
||||
info_dict['id'], stretched_ratio))
|
||||
else:
|
||||
assert fixup_policy == 'ignore'
|
||||
|
||||
try:
|
||||
self.post_process(filename, info_dict)
|
||||
except (PostProcessingError) as err:
|
||||
@@ -1188,14 +1286,15 @@ class YoutubeDL(object):
|
||||
"""Run all the postprocessors on the given file."""
|
||||
info = dict(ie_info)
|
||||
info['filepath'] = filename
|
||||
keep_video = None
|
||||
pps_chain = []
|
||||
if ie_info.get('__postprocessors') is not None:
|
||||
pps_chain.extend(ie_info['__postprocessors'])
|
||||
pps_chain.extend(self._pps)
|
||||
for pp in pps_chain:
|
||||
keep_video = None
|
||||
old_filename = info['filepath']
|
||||
try:
|
||||
keep_video_wish, new_info = pp.run(info)
|
||||
keep_video_wish, info = pp.run(info)
|
||||
if keep_video_wish is not None:
|
||||
if keep_video_wish:
|
||||
keep_video = keep_video_wish
|
||||
@@ -1204,12 +1303,12 @@ class YoutubeDL(object):
|
||||
keep_video = keep_video_wish
|
||||
except PostProcessingError as e:
|
||||
self.report_error(e.msg)
|
||||
if keep_video is False and not self.params.get('keepvideo', False):
|
||||
try:
|
||||
self.to_screen('Deleting original file %s (pass -k to keep)' % filename)
|
||||
os.remove(encodeFilename(filename))
|
||||
except (IOError, OSError):
|
||||
self.report_warning('Unable to remove downloaded video file')
|
||||
if keep_video is False and not self.params.get('keepvideo', False):
|
||||
try:
|
||||
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
|
||||
os.remove(encodeFilename(old_filename))
|
||||
except (IOError, OSError):
|
||||
self.report_warning('Unable to remove downloaded video file')
|
||||
|
||||
def _make_archive_id(self, info_dict):
|
||||
# Future-proof against any change in case
|
||||
@@ -1420,6 +1519,17 @@ class YoutubeDL(object):
|
||||
proxy_map.update(handler.proxies)
|
||||
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
|
||||
|
||||
if self.params.get('call_home', False):
|
||||
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
|
||||
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
|
||||
latest_version = self.urlopen(
|
||||
'https://yt-dl.org/latest/version').read().decode('utf-8')
|
||||
if version_tuple(latest_version) > version_tuple(__version__):
|
||||
self.report_warning(
|
||||
'You are using an outdated version (newest version: %s)! '
|
||||
'See https://yt-dl.org/update if you need help updating.' %
|
||||
latest_version)
|
||||
|
||||
def _setup_opener(self):
|
||||
timeout_val = self.params.get('socket_timeout')
|
||||
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
|
||||
@@ -1450,9 +1560,8 @@ class YoutubeDL(object):
|
||||
proxy_handler = compat_urllib_request.ProxyHandler(proxies)
|
||||
|
||||
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
|
||||
https_handler = make_HTTPS_handler(
|
||||
self.params.get('nocheckcertificate', False), debuglevel=debuglevel)
|
||||
ydlh = YoutubeDLHandler(debuglevel=debuglevel)
|
||||
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
|
||||
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
|
||||
opener = compat_urllib_request.build_opener(
|
||||
https_handler, proxy_handler, cookie_processor, ydlh)
|
||||
# Delete the default user-agent header, which would otherwise apply in
|
||||
|
||||
@@ -166,6 +166,7 @@ def _real_main(argv=None):
|
||||
if opts.recodevideo is not None:
|
||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
||||
parser.error('invalid video recode format specified')
|
||||
|
||||
if opts.date is not None:
|
||||
date = DateRange.day(opts.date)
|
||||
else:
|
||||
@@ -323,7 +324,11 @@ def _real_main(argv=None):
|
||||
'encoding': opts.encoding,
|
||||
'exec_cmd': opts.exec_cmd,
|
||||
'extract_flat': opts.extract_flat,
|
||||
'merge_output_format': opts.merge_output_format,
|
||||
'postprocessors': postprocessors,
|
||||
'fixup': opts.fixup,
|
||||
'source_address': opts.source_address,
|
||||
'call_home': opts.call_home,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
|
||||
@@ -4,6 +4,7 @@ import getpass
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -307,6 +308,32 @@ else:
|
||||
compat_kwargs = lambda kwargs: kwargs
|
||||
|
||||
|
||||
if sys.version_info < (2, 7):
|
||||
def compat_socket_create_connection(address, timeout, source_address=None):
|
||||
host, port = address
|
||||
err = None
|
||||
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
|
||||
af, socktype, proto, canonname, sa = res
|
||||
sock = None
|
||||
try:
|
||||
sock = socket.socket(af, socktype, proto)
|
||||
sock.settimeout(timeout)
|
||||
if source_address:
|
||||
sock.bind(source_address)
|
||||
sock.connect(sa)
|
||||
return sock
|
||||
except socket.error as _:
|
||||
err = _
|
||||
if sock is not None:
|
||||
sock.close()
|
||||
if err is not None:
|
||||
raise err
|
||||
else:
|
||||
raise socket.error("getaddrinfo returns an empty list")
|
||||
else:
|
||||
compat_socket_create_connection = socket.create_connection
|
||||
|
||||
|
||||
# Fix https://github.com/rg3/youtube-dl/issues/4223
|
||||
# See http://bugs.python.org/issue9161 for what is broken
|
||||
def workaround_optparse_bug9161():
|
||||
@@ -342,6 +369,7 @@ __all__ = [
|
||||
'compat_ord',
|
||||
'compat_parse_qs',
|
||||
'compat_print',
|
||||
'compat_socket_create_connection',
|
||||
'compat_str',
|
||||
'compat_subprocess_get_DEVNULL',
|
||||
'compat_urllib_error',
|
||||
|
||||
@@ -284,8 +284,19 @@ class FileDownloader(object):
|
||||
"""Download to a filename using the info from info_dict
|
||||
Return True on success and False otherwise
|
||||
"""
|
||||
nooverwrites_and_exists = (
|
||||
self.params.get('nooverwrites', False)
|
||||
and os.path.exists(encodeFilename(filename))
|
||||
)
|
||||
|
||||
continuedl_and_exists = (
|
||||
self.params.get('continuedl', False)
|
||||
and os.path.isfile(encodeFilename(filename))
|
||||
and not self.params.get('nopart', False)
|
||||
)
|
||||
|
||||
# Check file already present
|
||||
if filename != '-' and self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
|
||||
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
|
||||
self.report_file_already_downloaded(filename)
|
||||
self._hook_progress({
|
||||
'filename': filename,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .abc import ABCIE
|
||||
from .abc7news import Abc7NewsIE
|
||||
from .academicearth import AcademicEarthCourseIE
|
||||
from .addanime import AddAnimeIE
|
||||
from .adobetv import AdobeTVIE
|
||||
@@ -26,7 +27,8 @@ from .arte import (
|
||||
ArteTVEmbedIE,
|
||||
)
|
||||
from .atresplayer import AtresPlayerIE
|
||||
from .audiomack import AudiomackIE
|
||||
from .atttechchannel import ATTTechChannelIE
|
||||
from .audiomack import AudiomackIE, AudiomackAlbumIE
|
||||
from .auengine import AUEngineIE
|
||||
from .azubu import AzubuIE
|
||||
from .bambuser import BambuserIE, BambuserChannelIE
|
||||
@@ -69,6 +71,7 @@ from .cnn import (
|
||||
CNNArticleIE,
|
||||
)
|
||||
from .collegehumor import CollegeHumorIE
|
||||
from .collegerama import CollegeRamaIE
|
||||
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||
from .comcarcoff import ComCarCoffIE
|
||||
from .commonmistakes import CommonMistakesIE
|
||||
@@ -91,6 +94,7 @@ from .deezer import DeezerPlaylistIE
|
||||
from .dfb import DFBIE
|
||||
from .dotsub import DotsubIE
|
||||
from .dreisat import DreiSatIE
|
||||
from .drbonanza import DRBonanzaIE
|
||||
from .drtuber import DrTuberIE
|
||||
from .drtv import DRTVIE
|
||||
from .dvtv import DVTVIE
|
||||
@@ -172,6 +176,7 @@ from .goshgay import GoshgayIE
|
||||
from .grooveshark import GroovesharkIE
|
||||
from .groupon import GrouponIE
|
||||
from .hark import HarkIE
|
||||
from .hearthisat import HearThisAtIE
|
||||
from .heise import HeiseIE
|
||||
from .hellporno import HellPornoIE
|
||||
from .helsinki import HelsinkiIE
|
||||
@@ -206,6 +211,7 @@ from .jove import JoveIE
|
||||
from .jukebox import JukeboxIE
|
||||
from .jpopsukitv import JpopsukiIE
|
||||
from .kankan import KankanIE
|
||||
from .karaoketv import KaraoketvIE
|
||||
from .keezmovies import KeezMoviesIE
|
||||
from .khanacademy import KhanAcademyIE
|
||||
from .kickstarter import KickStarterIE
|
||||
@@ -222,6 +228,7 @@ from .livestream import (
|
||||
LivestreamOriginalIE,
|
||||
LivestreamShortenerIE,
|
||||
)
|
||||
from .lnkgo import LnkGoIE
|
||||
from .lrt import LRTIE
|
||||
from .lynda import (
|
||||
LyndaIE,
|
||||
@@ -274,6 +281,7 @@ from .nbc import (
|
||||
)
|
||||
from .ndr import NDRIE
|
||||
from .ndtv import NDTVIE
|
||||
from .netzkino import NetzkinoIE
|
||||
from .nerdcubed import NerdCubedFeedIE
|
||||
from .newgrounds import NewgroundsIE
|
||||
from .newstube import NewstubeIE
|
||||
@@ -290,6 +298,7 @@ from .nowness import NownessIE
|
||||
from .nowvideo import NowVideoIE
|
||||
from .npo import (
|
||||
NPOIE,
|
||||
NPOLiveIE,
|
||||
TegenlichtVproIE,
|
||||
)
|
||||
from .nrk import (
|
||||
@@ -338,6 +347,7 @@ from .ro220 import Ro220IE
|
||||
from .rottentomatoes import RottenTomatoesIE
|
||||
from .roxwel import RoxwelIE
|
||||
from .rtbf import RTBFIE
|
||||
from .rte import RteIE
|
||||
from .rtlnl import RtlXlIE
|
||||
from .rtlnow import RTLnowIE
|
||||
from .rtp import RTPIE
|
||||
@@ -400,6 +410,7 @@ from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .steam import SteamIE
|
||||
from .streamcloud import StreamcloudIE
|
||||
from .streamcz import StreamCZIE
|
||||
from .streetvoice import StreetVoiceIE
|
||||
from .sunporno import SunPornoIE
|
||||
from .swrmediathek import SWRMediathekIE
|
||||
from .syfy import SyfyIE
|
||||
@@ -446,10 +457,17 @@ from .tunein import TuneInIE
|
||||
from .turbo import TurboIE
|
||||
from .tutv import TutvIE
|
||||
from .tvigle import TvigleIE
|
||||
from .tvp import TvpIE
|
||||
from .tvp import TvpIE, TvpSeriesIE
|
||||
from .tvplay import TVPlayIE
|
||||
from .twentyfourvideo import TwentyFourVideoIE
|
||||
from .twitch import TwitchIE
|
||||
from .twitch import (
|
||||
TwitchVideoIE,
|
||||
TwitchChapterIE,
|
||||
TwitchVodIE,
|
||||
TwitchProfileIE,
|
||||
TwitchPastBroadcastsIE,
|
||||
TwitchStreamIE,
|
||||
)
|
||||
from .ubu import UbuIE
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
|
||||
68
youtube_dl/extractor/abc7news.py
Normal file
68
youtube_dl/extractor/abc7news.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import parse_iso8601
|
||||
|
||||
|
||||
class Abc7NewsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://abc7news\.com(?:/[^/]+/(?P<display_id>[^/]+))?/(?P<id>\d+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://abc7news.com/entertainment/east-bay-museum-celebrates-vintage-synthesizers/472581/',
|
||||
'info_dict': {
|
||||
'id': '472581',
|
||||
'display_id': 'east-bay-museum-celebrates-vintage-synthesizers',
|
||||
'ext': 'mp4',
|
||||
'title': 'East Bay museum celebrates history of synthesized music',
|
||||
'description': 'md5:a4f10fb2f2a02565c1749d4adbab4b10',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'timestamp': 1421123075,
|
||||
'upload_date': '20150113',
|
||||
'uploader': 'Jonathan Bloom',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://abc7news.com/472581',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id') or video_id
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
m3u8 = self._html_search_meta(
|
||||
'contentURL', webpage, 'm3u8 url', fatal=True)
|
||||
|
||||
formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = self._og_search_title(webpage).strip()
|
||||
description = self._og_search_description(webpage).strip()
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
timestamp = parse_iso8601(self._search_regex(
|
||||
r'<div class="meta">\s*<time class="timeago" datetime="([^"]+)">',
|
||||
webpage, 'upload date', fatal=False))
|
||||
uploader = self._search_regex(
|
||||
r'rel="author">([^<]+)</a>',
|
||||
webpage, 'uploader', default=None)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'timestamp': timestamp,
|
||||
'uploader': uploader,
|
||||
'formats': formats,
|
||||
}
|
||||
@@ -4,9 +4,12 @@ import time
|
||||
import hmac
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
xpath_text,
|
||||
@@ -44,6 +47,33 @@ class AtresPlayerIE(InfoExtractor):
|
||||
_PLAYER_URL_TEMPLATE = 'https://servicios.atresplayer.com/episode/getplayer.json?episodePk=%s'
|
||||
_EPISODE_URL_TEMPLATE = 'http://www.atresplayer.com/episodexml/%s'
|
||||
|
||||
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_form = {
|
||||
'j_username': username,
|
||||
'j_password': password,
|
||||
}
|
||||
|
||||
request = compat_urllib_request.Request(
|
||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
||||
error = self._html_search_regex(
|
||||
r'(?s)<ul class="list_error">(.+?)</ul>', response, 'error', default=None)
|
||||
if error:
|
||||
raise ExtractorError(
|
||||
'Unable to login: %s' % error, expected=True)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
|
||||
55
youtube_dl/extractor/atttechchannel.py
Normal file
55
youtube_dl/extractor/atttechchannel.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class ATTTechChannelIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://techchannel\.att\.com/play-video\.cfm/([^/]+/)*(?P<id>.+)'
|
||||
_TEST = {
|
||||
'url': 'http://techchannel.att.com/play-video.cfm/2014/1/27/ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use',
|
||||
'info_dict': {
|
||||
'id': '11316',
|
||||
'display_id': 'ATT-Archives-The-UNIX-System-Making-Computers-Easier-to-Use',
|
||||
'ext': 'flv',
|
||||
'title': 'AT&T Archives : The UNIX System: Making Computers Easier to Use',
|
||||
'description': 'A 1982 film about UNIX is the foundation for software in use around Bell Labs and AT&T.',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'upload_date': '20140127',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r"url\s*:\s*'(rtmp://[^']+)'",
|
||||
webpage, 'video URL')
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'mediaid\s*=\s*(\d+)',
|
||||
webpage, 'video id', fatal=False)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'[Rr]elease\s+date:\s*(\d{1,2}/\d{1,2}/\d{4})',
|
||||
webpage, 'upload date', fatal=False), False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
@@ -1,11 +1,15 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .soundcloud import SoundcloudIE
|
||||
from ..utils import ExtractorError
|
||||
|
||||
import time
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
class AudiomackIE(InfoExtractor):
|
||||
@@ -17,12 +21,13 @@ class AudiomackIE(InfoExtractor):
|
||||
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
|
||||
'info_dict':
|
||||
{
|
||||
'id': 'roosh-williams/extraordinary',
|
||||
'id': '310086',
|
||||
'ext': 'mp3',
|
||||
'title': 'Roosh Williams - Extraordinary'
|
||||
'uploader': 'Roosh Williams',
|
||||
'title': 'Extraordinary'
|
||||
}
|
||||
},
|
||||
# hosted on soundcloud via audiomack
|
||||
# audiomack wrapper around soundcloud song
|
||||
{
|
||||
'add_ie': ['Soundcloud'],
|
||||
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
|
||||
@@ -38,32 +43,97 @@ class AudiomackIE(InfoExtractor):
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
# URLs end with [uploader name]/[uploader title]
|
||||
# this title is whatever the user types in, and is rarely
|
||||
# the proper song title. Real metadata is in the api response
|
||||
album_url_tag = self._match_id(url)
|
||||
|
||||
# Request the extended version of the api for extra fields like artist and title
|
||||
api_response = self._download_json(
|
||||
"http://www.audiomack.com/api/music/url/song/%s?_=%d" % (
|
||||
video_id, time.time()),
|
||||
video_id)
|
||||
'http://www.audiomack.com/api/music/url/song/%s?extended=1&_=%d' % (
|
||||
album_url_tag, time.time()),
|
||||
album_url_tag)
|
||||
|
||||
if "url" not in api_response:
|
||||
raise ExtractorError("Unable to deduce api url of song")
|
||||
realurl = api_response["url"]
|
||||
# API is inconsistent with errors
|
||||
if 'url' not in api_response or not api_response['url'] or 'error' in api_response:
|
||||
raise ExtractorError('Invalid url %s', url)
|
||||
|
||||
# Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
||||
# - if so, pass the work off to the soundcloud extractor
|
||||
if SoundcloudIE.suitable(realurl):
|
||||
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
artist = self._html_search_regex(
|
||||
r'<span class="artist">(.*?)</span>', webpage, "artist")
|
||||
songtitle = self._html_search_regex(
|
||||
r'<h1 class="profile-title song-title"><span class="artist">.*?</span>(.*?)</h1>',
|
||||
webpage, "title")
|
||||
title = artist + " - " + songtitle
|
||||
# if so, pass the work off to the soundcloud extractor
|
||||
if SoundcloudIE.suitable(api_response['url']):
|
||||
return {'_type': 'url', 'url': api_response['url'], 'ie_key': 'Soundcloud'}
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': realurl,
|
||||
'id': api_response.get('id', album_url_tag),
|
||||
'uploader': api_response.get('artist'),
|
||||
'title': api_response.get('title'),
|
||||
'url': api_response['url'],
|
||||
}
|
||||
|
||||
|
||||
class AudiomackAlbumIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/album/(?P<id>[\w/-]+)'
|
||||
IE_NAME = 'audiomack:album'
|
||||
_TESTS = [
|
||||
# Standard album playlist
|
||||
{
|
||||
'url': 'http://www.audiomack.com/album/flytunezcom/tha-tour-part-2-mixtape',
|
||||
'playlist_count': 15,
|
||||
'info_dict':
|
||||
{
|
||||
'id': '812251',
|
||||
'title': 'Tha Tour: Part 2 (Official Mixtape)'
|
||||
}
|
||||
},
|
||||
# Album playlist ripped from fakeshoredrive with no metadata
|
||||
{
|
||||
'url': 'http://www.audiomack.com/album/fakeshoredrive/ppp-pistol-p-project',
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'title': '9.-heaven-or-hell-chimaca-ft-zuse-prod-by-dj-fu',
|
||||
'id': '9.-heaven-or-hell-chimaca-ft-zuse-prod-by-dj-fu',
|
||||
'ext': 'mp3',
|
||||
}
|
||||
}],
|
||||
'params': {
|
||||
'playliststart': 8,
|
||||
'playlistend': 8,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
# URLs end with [uploader name]/[uploader title]
|
||||
# this title is whatever the user types in, and is rarely
|
||||
# the proper song title. Real metadata is in the api response
|
||||
album_url_tag = self._match_id(url)
|
||||
result = {'_type': 'playlist', 'entries': []}
|
||||
# There is no one endpoint for album metadata - instead it is included/repeated in each song's metadata
|
||||
# Therefore we don't know how many songs the album has and must infi-loop until failure
|
||||
for track_no in itertools.count():
|
||||
# Get song's metadata
|
||||
api_response = self._download_json(
|
||||
'http://www.audiomack.com/api/music/url/album/%s/%d?extended=1&_=%d'
|
||||
% (album_url_tag, track_no, time.time()), album_url_tag,
|
||||
note='Querying song information (%d)' % (track_no + 1))
|
||||
|
||||
# Total failure, only occurs when url is totally wrong
|
||||
# Won't happen in middle of valid playlist (next case)
|
||||
if 'url' not in api_response or 'error' in api_response:
|
||||
raise ExtractorError('Invalid url for track %d of album url %s' % (track_no, url))
|
||||
# URL is good but song id doesn't exist - usually means end of playlist
|
||||
elif not api_response['url']:
|
||||
break
|
||||
else:
|
||||
# Pull out the album metadata and add to result (if it exists)
|
||||
for resultkey, apikey in [('id', 'album_id'), ('title', 'album_title')]:
|
||||
if apikey in api_response and resultkey not in result:
|
||||
result[resultkey] = api_response[apikey]
|
||||
song_id = url_basename(api_response['url']).rpartition('.')[0]
|
||||
result['entries'].append({
|
||||
'id': api_response.get('id', song_id),
|
||||
'uploader': api_response.get('artist'),
|
||||
'title': api_response.get('title', song_id),
|
||||
'url': api_response['url'],
|
||||
})
|
||||
return result
|
||||
|
||||
@@ -161,7 +161,8 @@ class BandcampAlbumIE(InfoExtractor):
|
||||
entries = [
|
||||
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
|
||||
for t_path in tracks_paths]
|
||||
title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title')
|
||||
title = self._search_regex(
|
||||
r'album_title\s*:\s*"(.*?)"', webpage, 'title', fatal=False)
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
|
||||
@@ -51,7 +51,7 @@ class CNNIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
path = mobj.group('path')
|
||||
page_title = mobj.group('title')
|
||||
info_url = 'http://cnn.com/video/data/3.0/%s/index.xml' % path
|
||||
info_url = 'http://edition.cnn.com/video/data/3.0/%s/index.xml' % path
|
||||
info = self._download_xml(info_url, page_title)
|
||||
|
||||
formats = []
|
||||
@@ -143,13 +143,13 @@ class CNNArticleIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?:edition|www)\.)?cnn\.com/(?!video/)'
|
||||
_TEST = {
|
||||
'url': 'http://www.cnn.com/2014/12/21/politics/obama-north-koreas-hack-not-war-but-cyber-vandalism/',
|
||||
'md5': '275b326f85d80dff7592a9820f5dc887',
|
||||
'md5': '689034c2a3d9c6dc4aa72d65a81efd01',
|
||||
'info_dict': {
|
||||
'id': 'bestoftv/2014/12/21/sotu-crowley-president-obama-north-korea-not-going-to-be-intimidated.cnn',
|
||||
'id': 'bestoftv/2014/12/21/ip-north-korea-obama.cnn',
|
||||
'ext': 'mp4',
|
||||
'title': 'Obama: We\'re not going to be intimidated',
|
||||
'description': 'md5:e735586f3dc936075fa654a4d91b21f9',
|
||||
'upload_date': '20141220',
|
||||
'title': 'Obama: Cyberattack not an act of war',
|
||||
'description': 'md5:51ce6750450603795cad0cdfbd7d05c5',
|
||||
'upload_date': '20141221',
|
||||
},
|
||||
'add_ie': ['CNN'],
|
||||
}
|
||||
|
||||
92
youtube_dl/extractor/collegerama.py
Normal file
92
youtube_dl/extractor/collegerama.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_request
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class CollegeRamaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://collegerama\.tudelft\.nl/Mediasite/Play/(?P<id>[\da-f]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',
|
||||
'md5': '481fda1c11f67588c0d9d8fbdced4e39',
|
||||
'info_dict': {
|
||||
'id': '585a43626e544bdd97aeb71a0ec907a01d',
|
||||
'ext': 'mp4',
|
||||
'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',
|
||||
'description': '',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 7713.088,
|
||||
'timestamp': 1413309600,
|
||||
'upload_date': '20141014',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',
|
||||
'md5': 'ef1fdded95bdf19b12c5999949419c92',
|
||||
'info_dict': {
|
||||
'id': '86a9ea9f53e149079fbdb4202b521ed21d',
|
||||
'ext': 'wmv',
|
||||
'title': '64ste Vakantiecursus: Afvalwater',
|
||||
'description': 'md5:7fd774865cc69d972f542b157c328305',
|
||||
'duration': 10853,
|
||||
'timestamp': 1326446400,
|
||||
'upload_date': '20120113',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
player_options_request = {
|
||||
"getPlayerOptionsRequest": {
|
||||
"ResourceId": video_id,
|
||||
"QueryString": "",
|
||||
}
|
||||
}
|
||||
|
||||
request = compat_urllib_request.Request(
|
||||
'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
|
||||
json.dumps(player_options_request))
|
||||
request.add_header('Content-Type', 'application/json')
|
||||
|
||||
player_options = self._download_json(request, video_id)
|
||||
|
||||
presentation = player_options['d']['Presentation']
|
||||
title = presentation['Title']
|
||||
description = presentation.get('Description')
|
||||
thumbnail = None
|
||||
duration = float_or_none(presentation.get('Duration'), 1000)
|
||||
timestamp = int_or_none(presentation.get('UnixTime'), 1000)
|
||||
|
||||
formats = []
|
||||
for stream in presentation['Streams']:
|
||||
for video in stream['VideoUrls']:
|
||||
thumbnail_url = stream.get('ThumbnailUrl')
|
||||
if thumbnail_url:
|
||||
thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url
|
||||
format_id = video['MediaType']
|
||||
if format_id == 'SS':
|
||||
continue
|
||||
formats.append({
|
||||
'url': video['Location'],
|
||||
'format_id': format_id,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'formats': formats,
|
||||
}
|
||||
@@ -34,12 +34,12 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
|
||||
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||
IE_DESC = 'The Daily Show / The Colbert Report'
|
||||
# urls can be abbreviations like :thedailyshow or :colbert
|
||||
# urls can be abbreviations like :thedailyshow
|
||||
# urls for episodes like:
|
||||
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
|
||||
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
|
||||
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
|
||||
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|
||||
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow)
|
||||
|https?://(:www\.)?
|
||||
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
|
||||
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
|
||||
|
||||
@@ -114,6 +114,9 @@ class InfoExtractor(object):
|
||||
to add to the request.
|
||||
* http_post_data Additional data to send with a POST
|
||||
request.
|
||||
* stretched_ratio If given and not 1, indicates that the
|
||||
video's pixels are not square.
|
||||
width : height ratio as float.
|
||||
url: Final video URL.
|
||||
ext: Video filename extension.
|
||||
format: The video format, defaults to ext (used for --get-format)
|
||||
@@ -147,6 +150,17 @@ class InfoExtractor(object):
|
||||
like_count: Number of positive ratings of the video
|
||||
dislike_count: Number of negative ratings of the video
|
||||
comment_count: Number of comments on the video
|
||||
comments: A list of comments, each with one or more of the following
|
||||
properties (all but one of text or html optional):
|
||||
* "author" - human-readable name of the comment author
|
||||
* "author_id" - user ID of the comment author
|
||||
* "id" - Comment ID
|
||||
* "html" - Comment as HTML
|
||||
* "text" - Plain text of the comment
|
||||
* "timestamp" - UNIX timestamp of comment
|
||||
* "parent" - ID of the comment this one is replying to.
|
||||
Set to "root" to indicate that this is a
|
||||
comment to the original video.
|
||||
age_limit: Age restriction for the video, as an integer (years)
|
||||
webpage_url: The url to the video webpage, if given to youtube-dl it
|
||||
should allow to get the same result again. (It will be set
|
||||
@@ -365,9 +379,19 @@ class InfoExtractor(object):
|
||||
|
||||
return content
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5):
|
||||
""" Returns the data of the page as a string """
|
||||
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal)
|
||||
success = False
|
||||
try_count = 0
|
||||
while success is False:
|
||||
try:
|
||||
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal)
|
||||
success = True
|
||||
except compat_http_client.IncompleteRead as e:
|
||||
try_count += 1
|
||||
if try_count >= tries:
|
||||
raise e
|
||||
self._sleep(timeout, video_id)
|
||||
if res is False:
|
||||
return res
|
||||
else:
|
||||
@@ -718,8 +742,14 @@ class InfoExtractor(object):
|
||||
'Unable to download f4m manifest')
|
||||
|
||||
formats = []
|
||||
manifest_version = '1.0'
|
||||
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
|
||||
if not media_nodes:
|
||||
manifest_version = '2.0'
|
||||
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
|
||||
for i, media_el in enumerate(media_nodes):
|
||||
if manifest_version == '2.0':
|
||||
manifest_url = '/'.join(manifest_url.split('/')[:-1]) + '/' + media_el.attrib.get('href')
|
||||
tbr = int_or_none(media_el.attrib.get('bitrate'))
|
||||
format_id = 'f4m-%d' % (i if tbr is None else tbr)
|
||||
formats.append({
|
||||
|
||||
131
youtube_dl/extractor/drbonanza.py
Normal file
131
youtube_dl/extractor/drbonanza.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class DRBonanzaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dr\.dk/bonanza/(?:[^/]+/)+(?:[^/])+?(?:assetId=(?P<id>\d+))?(?:[#&]|$)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.dr.dk/bonanza/serie/portraetter/Talkshowet.htm?assetId=65517',
|
||||
'md5': 'fe330252ddea607635cf2eb2c99a0af3',
|
||||
'info_dict': {
|
||||
'id': '65517',
|
||||
'ext': 'mp4',
|
||||
'title': 'Talkshowet - Leonard Cohen',
|
||||
'description': 'md5:8f34194fb30cd8c8a30ad8b27b70c0ca',
|
||||
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
|
||||
'timestamp': 1295537932,
|
||||
'upload_date': '20110120',
|
||||
'duration': 3664,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.dr.dk/bonanza/radio/serie/sport/fodbold.htm?assetId=59410',
|
||||
'md5': '6dfe039417e76795fb783c52da3de11d',
|
||||
'info_dict': {
|
||||
'id': '59410',
|
||||
'ext': 'mp3',
|
||||
'title': 'EM fodbold 1992 Danmark - Tyskland finale Transmission',
|
||||
'description': 'md5:501e5a195749480552e214fbbed16c4e',
|
||||
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
|
||||
'timestamp': 1223274900,
|
||||
'upload_date': '20081006',
|
||||
'duration': 7369,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
url_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, url_id)
|
||||
|
||||
if url_id:
|
||||
info = json.loads(self._html_search_regex(r'({.*?%s.*})' % url_id, webpage, 'json'))
|
||||
else:
|
||||
# Just fetch the first video on that page
|
||||
info = json.loads(self._html_search_regex(r'bonanzaFunctions.newPlaylist\(({.*})\)', webpage, 'json'))
|
||||
|
||||
asset_id = str(info['AssetId'])
|
||||
title = info['Title'].rstrip(' \'\"-,.:;!?')
|
||||
duration = int_or_none(info.get('Duration'), scale=1000)
|
||||
# First published online. "FirstPublished" contains the date for original airing.
|
||||
timestamp = parse_iso8601(
|
||||
re.sub(r'\.\d+$', '', info['Created']))
|
||||
|
||||
def parse_filename_info(url):
|
||||
match = re.search(r'/\d+_(?P<width>\d+)x(?P<height>\d+)x(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
|
||||
if match:
|
||||
return {
|
||||
'width': int(match.group('width')),
|
||||
'height': int(match.group('height')),
|
||||
'vbr': int(match.group('bitrate')),
|
||||
'ext': match.group('ext')
|
||||
}
|
||||
match = re.search(r'/\d+_(?P<bitrate>\d+)K\.(?P<ext>\w+)$', url)
|
||||
if match:
|
||||
return {
|
||||
'vbr': int(match.group('bitrate')),
|
||||
'ext': match.group(2)
|
||||
}
|
||||
return {}
|
||||
|
||||
video_types = ['VideoHigh', 'VideoMid', 'VideoLow']
|
||||
preferencemap = {
|
||||
'VideoHigh': -1,
|
||||
'VideoMid': -2,
|
||||
'VideoLow': -3,
|
||||
'Audio': -4,
|
||||
}
|
||||
|
||||
formats = []
|
||||
for file in info['Files']:
|
||||
if info['Type'] == "Video":
|
||||
if file['Type'] in video_types:
|
||||
format = parse_filename_info(file['Location'])
|
||||
format.update({
|
||||
'url': file['Location'],
|
||||
'format_id': file['Type'].replace('Video', ''),
|
||||
'preference': preferencemap.get(file['Type'], -10),
|
||||
})
|
||||
formats.append(format)
|
||||
elif file['Type'] == "Thumb":
|
||||
thumbnail = file['Location']
|
||||
elif info['Type'] == "Audio":
|
||||
if file['Type'] == "Audio":
|
||||
format = parse_filename_info(file['Location'])
|
||||
format.update({
|
||||
'url': file['Location'],
|
||||
'format_id': file['Type'],
|
||||
'vcodec': 'none',
|
||||
})
|
||||
formats.append(format)
|
||||
elif file['Type'] == "Thumb":
|
||||
thumbnail = file['Location']
|
||||
|
||||
description = '%s\n%s\n%s\n' % (
|
||||
info['Description'], info['Actors'], info['Colophon'])
|
||||
|
||||
for f in formats:
|
||||
f['url'] = f['url'].replace('rtmp://vod-bonanza.gss.dr.dk/bonanza/', 'http://vodfiles.dr.dk/')
|
||||
f['url'] = f['url'].replace('mp4:bonanza', 'bonanza')
|
||||
self._sort_formats(formats)
|
||||
|
||||
display_id = re.sub(r'[^\w\d-]', '', re.sub(r' ', '-', title.lower())) + '-' + asset_id
|
||||
display_id = re.sub(r'-+', '-', display_id)
|
||||
|
||||
return {
|
||||
'id': asset_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'timestamp': timestamp,
|
||||
'duration': duration,
|
||||
}
|
||||
@@ -6,7 +6,7 @@ from ..utils import parse_iso8601
|
||||
|
||||
|
||||
class DRTVIE(SubtitlesInfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)'
|
||||
_VALID_URL = r'https?://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8',
|
||||
|
||||
@@ -9,6 +9,9 @@ from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class EightTracksIE(InfoExtractor):
|
||||
@@ -112,14 +115,29 @@ class EightTracksIE(InfoExtractor):
|
||||
session = str(random.randint(0, 1000000000))
|
||||
mix_id = data['id']
|
||||
track_count = data['tracks_count']
|
||||
duration = data['duration']
|
||||
avg_song_duration = float(duration) / track_count
|
||||
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
|
||||
next_url = first_url
|
||||
entries = []
|
||||
|
||||
for i in range(track_count):
|
||||
api_json = self._download_webpage(
|
||||
next_url, playlist_id,
|
||||
note='Downloading song information %d/%d' % (i + 1, track_count),
|
||||
errnote='Failed to download song information')
|
||||
api_json = None
|
||||
download_tries = 0
|
||||
|
||||
while api_json is None:
|
||||
try:
|
||||
api_json = self._download_webpage(
|
||||
next_url, playlist_id,
|
||||
note='Downloading song information %d/%d' % (i + 1, track_count),
|
||||
errnote='Failed to download song information')
|
||||
except ExtractorError:
|
||||
if download_tries > 3:
|
||||
raise
|
||||
else:
|
||||
download_tries += 1
|
||||
self._sleep(avg_song_duration, playlist_id)
|
||||
|
||||
api_data = json.loads(api_json)
|
||||
track_data = api_data['set']['track']
|
||||
info = {
|
||||
@@ -131,6 +149,7 @@ class EightTracksIE(InfoExtractor):
|
||||
'ext': 'm4a',
|
||||
}
|
||||
entries.append(info)
|
||||
|
||||
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (
|
||||
session, mix_id, track_data['id'])
|
||||
return {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
|
||||
@@ -24,9 +22,7 @@ class ElPaisIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
prefix = self._html_search_regex(
|
||||
|
||||
@@ -7,10 +7,9 @@ from ..compat import (
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
str_to_int,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
@@ -28,68 +27,81 @@ class FourTubeIE(InfoExtractor):
|
||||
'uploader': 'WCP Club',
|
||||
'uploader_id': 'wcp-club',
|
||||
'upload_date': '20131031',
|
||||
'timestamp': 1383263892,
|
||||
'duration': 583,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'categories': list,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage_url = 'http://www.4tube.com/videos/' + video_id
|
||||
webpage = self._download_webpage(webpage_url, video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
title = self._html_search_meta('name', webpage)
|
||||
timestamp = parse_iso8601(self._html_search_meta(
|
||||
'uploadDate', webpage))
|
||||
thumbnail = self._html_search_meta('thumbnailUrl', webpage)
|
||||
uploader_id = self._html_search_regex(
|
||||
r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">',
|
||||
webpage, 'uploader id')
|
||||
uploader = self._html_search_regex(
|
||||
r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">',
|
||||
webpage, 'uploader')
|
||||
|
||||
playlist_json = self._html_search_regex(r'var playerConfigPlaylist\s+=\s+([^;]+)', webpage, 'Playlist')
|
||||
media_id = self._search_regex(r'idMedia:\s*(\d+)', playlist_json, 'Media Id')
|
||||
sources = self._search_regex(r'sources:\s*\[([^\]]*)\]', playlist_json, 'Sources').split(',')
|
||||
title = self._search_regex(r'title:\s*"([^"]*)', playlist_json, 'Title')
|
||||
thumbnail_url = self._search_regex(r'image:\s*"([^"]*)', playlist_json, 'Thumbnail', fatal=False)
|
||||
categories_html = self._search_regex(
|
||||
r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>',
|
||||
webpage, 'categories', fatal=False)
|
||||
categories = None
|
||||
if categories_html:
|
||||
categories = [
|
||||
c.strip() for c in re.findall(
|
||||
r'(?s)<li><a.*?>(.*?)</a>', categories_html)]
|
||||
|
||||
uploader_str = self._search_regex(r'<span>Uploaded by</span>(.*?)<span>', webpage, 'uploader', fatal=False)
|
||||
mobj = re.search(r'<a href="/sites/(?P<id>[^"]+)"><strong>(?P<name>[^<]+)</strong></a>', uploader_str)
|
||||
(uploader, uploader_id) = (mobj.group('name'), mobj.group('id')) if mobj else (clean_html(uploader_str), None)
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'<meta itemprop="interactionCount" content="UserPlays:([0-9,]+)">',
|
||||
webpage, 'view count', fatal=False))
|
||||
like_count = str_to_int(self._search_regex(
|
||||
r'<meta itemprop="interactionCount" content="UserLikes:([0-9,]+)">',
|
||||
webpage, 'like count', fatal=False))
|
||||
duration = parse_duration(self._html_search_meta('duration', webpage))
|
||||
|
||||
upload_date = None
|
||||
view_count = None
|
||||
duration = None
|
||||
description = self._html_search_meta('description', webpage, 'description')
|
||||
if description:
|
||||
upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
|
||||
fatal=False)
|
||||
if upload_date:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
|
||||
if view_count:
|
||||
view_count = str_to_int(view_count)
|
||||
duration = parse_duration(self._search_regex(r'Length: (\d+m\d+s)', description, 'duration', fatal=False))
|
||||
params_js = self._search_regex(
|
||||
r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
|
||||
webpage, 'initialization parameters'
|
||||
)
|
||||
params = self._parse_json('[%s]' % params_js, video_id)
|
||||
media_id = params[0]
|
||||
sources = ['%s' % p for p in params[2]]
|
||||
|
||||
token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
|
||||
token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format(
|
||||
media_id, '+'.join(sources))
|
||||
headers = {
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
b'Origin': b'http://www.4tube.com',
|
||||
}
|
||||
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
|
||||
tokens = self._download_json(token_req, video_id)
|
||||
|
||||
formats = [{
|
||||
'url': tokens[format]['token'],
|
||||
'format_id': format + 'p',
|
||||
'resolution': format + 'p',
|
||||
'quality': int(format),
|
||||
} for format in sources]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail_url,
|
||||
'categories': categories,
|
||||
'thumbnail': thumbnail,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'upload_date': upload_date,
|
||||
'timestamp': timestamp,
|
||||
'like_count': like_count,
|
||||
'view_count': view_count,
|
||||
'duration': duration,
|
||||
'age_limit': 18,
|
||||
'webpage_url': webpage_url,
|
||||
}
|
||||
|
||||
117
youtube_dl/extractor/hearthisat.py
Normal file
117
youtube_dl/extractor/hearthisat.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
str_to_int,
|
||||
urlencode_postdata,
|
||||
urlhandle_detect_ext,
|
||||
)
|
||||
|
||||
|
||||
class HearThisAtIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
|
||||
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
|
||||
_TEST = {
|
||||
'url': 'https://hearthis.at/moofi/dr-kreep',
|
||||
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
|
||||
'info_dict': {
|
||||
'id': '150939',
|
||||
'ext': 'wav',
|
||||
'title': 'Moofi - Dr. Kreep',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'timestamp': 1421564134,
|
||||
'description': 'Creepy Patch. Mutable Instruments Braids Vowel + Formant Mode.',
|
||||
'upload_date': '20150118',
|
||||
'comment_count': int,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'duration': 71,
|
||||
'categories': ['Experimental'],
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
track_id = self._search_regex(
|
||||
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
|
||||
|
||||
payload = urlencode_postdata({'tracks[]': track_id})
|
||||
req = compat_urllib_request.Request(self._PLAYLIST_URL, payload)
|
||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||
|
||||
track = self._download_json(req, track_id, 'Downloading playlist')[0]
|
||||
title = '{artist:s} - {title:s}'.format(**track)
|
||||
|
||||
categories = None
|
||||
if track.get('category'):
|
||||
categories = [track['category']]
|
||||
|
||||
description = self._og_search_description(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
|
||||
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
|
||||
view_count = str_to_int(self._search_regex(
|
||||
meta_span % 'plays_count', webpage, 'view count', fatal=False))
|
||||
like_count = str_to_int(self._search_regex(
|
||||
meta_span % 'likes_count', webpage, 'like count', fatal=False))
|
||||
comment_count = str_to_int(self._search_regex(
|
||||
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
|
||||
duration = str_to_int(self._search_regex(
|
||||
r'data-length="(\d+)', webpage, 'duration', fatal=False))
|
||||
timestamp = str_to_int(self._search_regex(
|
||||
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
|
||||
|
||||
formats = []
|
||||
mp3_url = self._search_regex(
|
||||
r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
|
||||
webpage, 'mp3 URL', fatal=False)
|
||||
if mp3_url:
|
||||
formats.append({
|
||||
'format_id': 'mp3',
|
||||
'vcodec': 'none',
|
||||
'acodec': 'mp3',
|
||||
'url': mp3_url,
|
||||
})
|
||||
download_path = self._search_regex(
|
||||
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
|
||||
webpage, 'download URL', default=None)
|
||||
if download_path:
|
||||
download_url = compat_urlparse.urljoin(url, download_path)
|
||||
ext_req = HEADRequest(download_url)
|
||||
ext_handle = self._request_webpage(
|
||||
ext_req, display_id, note='Determining extension')
|
||||
ext = urlhandle_detect_ext(ext_handle)
|
||||
formats.append({
|
||||
'format_id': 'download',
|
||||
'vcodec': 'none',
|
||||
'ext': ext,
|
||||
'url': download_url,
|
||||
'preference': 2, # Usually better quality
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': track_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'view_count': view_count,
|
||||
'comment_count': comment_count,
|
||||
'like_count': like_count,
|
||||
'categories': categories,
|
||||
}
|
||||
40
youtube_dl/extractor/karaoketv.py
Normal file
40
youtube_dl/extractor/karaoketv.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse
|
||||
from ..utils import (
|
||||
js_to_json,
|
||||
)
|
||||
|
||||
|
||||
class KaraoketvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://karaoketv\.co\.il/\?container=songs&id=(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://karaoketv.co.il/?container=songs&id=171568',
|
||||
'info_dict': {
|
||||
'id': '171568',
|
||||
'ext': 'mp4',
|
||||
'title': 'אל העולם שלך - רותם כהן - שרים קריוקי',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
page_video_url = self._og_search_video_url(webpage, video_id)
|
||||
config_json = compat_urllib_parse.unquote_plus(self._search_regex(
|
||||
r'config=(.*)', page_video_url, 'configuration'))
|
||||
|
||||
urls_info_json = self._download_json(
|
||||
config_json, video_id, 'Downloading configuration',
|
||||
transform_source=js_to_json)
|
||||
|
||||
url = urls_info_json['playlist'][0]['url']
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'url': url,
|
||||
}
|
||||
124
youtube_dl/extractor/lnkgo.py
Normal file
124
youtube_dl/extractor/lnkgo.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class LnkGoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?lnkgo\.alfa\.lt/visi\-video/(?P<show>[^/]+)/ziurek\-(?P<display_id>[A-Za-z0-9\-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://lnkgo.alfa.lt/visi-video/yra-kaip-yra/ziurek-yra-kaip-yra-162',
|
||||
'info_dict': {
|
||||
'id': '46712',
|
||||
'ext': 'mp4',
|
||||
'title': 'Yra kaip yra',
|
||||
'upload_date': '20150107',
|
||||
'description': 'md5:d82a5e36b775b7048617f263a0e3475e',
|
||||
'age_limit': 7,
|
||||
'duration': 3019,
|
||||
'thumbnail': 're:^https?://.*\.jpg$'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # HLS download
|
||||
},
|
||||
}, {
|
||||
'url': 'http://lnkgo.alfa.lt/visi-video/aktualai-pratesimas/ziurek-nerdas-taiso-kompiuteri-2',
|
||||
'info_dict': {
|
||||
'id': '47289',
|
||||
'ext': 'mp4',
|
||||
'title': 'Nėrdas: Kompiuterio Valymas',
|
||||
'upload_date': '20150113',
|
||||
'description': 'md5:7352d113a242a808676ff17e69db6a69',
|
||||
'age_limit': 18,
|
||||
'duration': 346,
|
||||
'thumbnail': 're:^https?://.*\.jpg$'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # HLS download
|
||||
},
|
||||
}]
|
||||
_AGE_LIMITS = {
|
||||
'N-7': 7,
|
||||
'N-14': 14,
|
||||
'S': 18,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
webpage = self._download_webpage(
|
||||
url, display_id, 'Downloading player webpage')
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'data-ep="([^"]+)"', webpage, 'video ID')
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
|
||||
thumbnail_w = int_or_none(
|
||||
self._og_search_property('image:width', webpage, 'thumbnail width', fatal=False))
|
||||
thumbnail_h = int_or_none(
|
||||
self._og_search_property('image:height', webpage, 'thumbnail height', fatal=False))
|
||||
thumbnail = {
|
||||
'url': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
if thumbnail_w and thumbnail_h:
|
||||
thumbnail.update({
|
||||
'width': thumbnail_w,
|
||||
'height': thumbnail_h,
|
||||
})
|
||||
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'class="meta-item\sair-time">.*?<strong>([^<]+)</strong>', webpage, 'upload date', fatal=False))
|
||||
duration = int_or_none(self._search_regex(
|
||||
r'VideoDuration = "([^"]+)"', webpage, 'duration', fatal=False))
|
||||
|
||||
pg_rating = self._search_regex(
|
||||
r'pgrating="([^"]+)"', webpage, 'PG rating', fatal=False, default='')
|
||||
age_limit = self._AGE_LIMITS.get(pg_rating.upper(), 0)
|
||||
|
||||
sources_js = self._search_regex(
|
||||
r'(?s)sources:\s(\[.*?\]),', webpage, 'sources')
|
||||
sources = self._parse_json(
|
||||
sources_js, video_id, transform_source=js_to_json)
|
||||
|
||||
formats = []
|
||||
for source in sources:
|
||||
if source.get('provider') == 'rtmp':
|
||||
m = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<play_path>.+)$', source['file'])
|
||||
if not m:
|
||||
continue
|
||||
formats.append({
|
||||
'format_id': 'rtmp',
|
||||
'ext': 'flv',
|
||||
'url': m.group('url'),
|
||||
'play_path': m.group('play_path'),
|
||||
'page_url': url,
|
||||
})
|
||||
elif source.get('file').endswith('.m3u8'):
|
||||
formats.append({
|
||||
'format_id': 'hls',
|
||||
'ext': source.get('type', 'mp4'),
|
||||
'url': source['file'],
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnails': [thumbnail],
|
||||
'duration': duration,
|
||||
'description': description,
|
||||
'age_limit': age_limit,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import json
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
@@ -78,6 +79,16 @@ class NBCNewsIE(InfoExtractor):
|
||||
},
|
||||
'add_ie': ['ThePlatform'],
|
||||
},
|
||||
{
|
||||
'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156',
|
||||
'md5': 'fdbf39ab73a72df5896b6234ff98518a',
|
||||
'info_dict': {
|
||||
'id': 'Wjf9EDR3A_60',
|
||||
'ext': 'mp4',
|
||||
'title': 'FULL EPISODE: Family Business',
|
||||
'description': 'md5:757988edbaae9d7be1d585eb5d55cc04',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -115,10 +126,19 @@ class NBCNewsIE(InfoExtractor):
|
||||
if not base_url:
|
||||
continue
|
||||
playlist_url = base_url + '?form=MPXNBCNewsAPI'
|
||||
all_videos = self._download_json(playlist_url, title)['videos']
|
||||
|
||||
try:
|
||||
info = next(v for v in all_videos if v['mpxId'] == mpxid)
|
||||
all_videos = self._download_json(playlist_url, title)
|
||||
except ExtractorError as ee:
|
||||
if isinstance(ee.cause, compat_HTTPError):
|
||||
continue
|
||||
raise
|
||||
|
||||
if not all_videos or 'videos' not in all_videos:
|
||||
continue
|
||||
|
||||
try:
|
||||
info = next(v for v in all_videos['videos'] if v['mpxId'] == mpxid)
|
||||
break
|
||||
except StopIteration:
|
||||
continue
|
||||
|
||||
@@ -27,9 +27,7 @@ class NDTVIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
filename = self._search_regex(
|
||||
|
||||
86
youtube_dl/extractor/netzkino.py
Normal file
86
youtube_dl/extractor/netzkino.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class NetzkinoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?netzkino\.de/\#!/(?P<category>[^/]+)/(?P<id>[^/]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.netzkino.de/#!/scifikino/rakete-zum-mond',
|
||||
'md5': '92a3f8b76f8d7220acce5377ea5d4873',
|
||||
'info_dict': {
|
||||
'id': 'rakete-zum-mond',
|
||||
'ext': 'mp4',
|
||||
'title': 'Rakete zum Mond (Endstation Mond, Destination Moon)',
|
||||
'comments': 'mincount:3',
|
||||
'description': 'md5:1eddeacc7e62d5a25a2d1a7290c64a28',
|
||||
'upload_date': '20120813',
|
||||
'thumbnail': 're:https?://.*\.jpg$',
|
||||
'timestamp': 1344858571,
|
||||
'age_limit': 12,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
category_id = mobj.group('category')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
api_url = 'http://api.netzkino.de.simplecache.net/capi-2.0a/categories/%s.json?d=www' % category_id
|
||||
api_info = self._download_json(api_url, video_id)
|
||||
info = next(
|
||||
p for p in api_info['posts'] if p['slug'] == video_id)
|
||||
custom_fields = info['custom_fields']
|
||||
|
||||
production_js = self._download_webpage(
|
||||
'http://www.netzkino.de/beta/dist/production.min.js', video_id,
|
||||
note='Downloading player code')
|
||||
avo_js = self._search_regex(
|
||||
r'window\.avoCore\s*=.*?urlTemplate:\s*(\{.*?"\})',
|
||||
production_js, 'URL templates')
|
||||
templates = self._parse_json(
|
||||
avo_js, video_id, transform_source=js_to_json)
|
||||
|
||||
suffix = {
|
||||
'hds': '.mp4/manifest.f4m',
|
||||
'hls': '.mp4/master.m3u8',
|
||||
'pmd': '.mp4',
|
||||
}
|
||||
film_fn = custom_fields['Streaming'][0]
|
||||
formats = [{
|
||||
'format_id': key,
|
||||
'ext': 'mp4',
|
||||
'url': tpl.replace('{}', film_fn) + suffix[key],
|
||||
} for key, tpl in templates.items()]
|
||||
self._sort_formats(formats)
|
||||
|
||||
comments = [{
|
||||
'timestamp': parse_iso8601(c.get('date'), delimiter=' '),
|
||||
'id': c['id'],
|
||||
'author': c['name'],
|
||||
'html': c['content'],
|
||||
'parent': 'root' if c.get('parent', 0) == 0 else c['parent'],
|
||||
} for c in info.get('comments', [])]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'comments': comments,
|
||||
'title': info['title'],
|
||||
'age_limit': int_or_none(custom_fields.get('FSK')[0]),
|
||||
'timestamp': parse_iso8601(info.get('date'), delimiter=' '),
|
||||
'description': clean_html(info.get('content')),
|
||||
'thumbnail': info.get('thumbnail'),
|
||||
'playlist_title': api_info.get('title'),
|
||||
'playlist_id': category_id,
|
||||
}
|
||||
@@ -1,19 +1,26 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
unified_strdate,
|
||||
fix_xml_ampersands,
|
||||
parse_duration,
|
||||
qualities,
|
||||
strip_jsonp,
|
||||
unified_strdate,
|
||||
url_basename,
|
||||
fix_xml_ampersands,
|
||||
)
|
||||
|
||||
|
||||
class NPOIE(InfoExtractor):
|
||||
class NPOBaseIE(InfoExtractor):
|
||||
def _get_token(self, video_id):
|
||||
token_page = self._download_webpage(
|
||||
'http://ida.omroep.nl/npoplayer/i.js',
|
||||
video_id, note='Downloading token')
|
||||
return self._search_regex(
|
||||
r'npoplayer\.token = "(.+?)"', token_page, 'token')
|
||||
|
||||
|
||||
class NPOIE(NPOBaseIE):
|
||||
IE_NAME = 'npo.nl'
|
||||
_VALID_URL = r'https?://www\.npo\.nl/[^/]+/[^/]+/(?P<id>[^/?]+)'
|
||||
|
||||
@@ -67,11 +74,20 @@ class NPOIE(InfoExtractor):
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
# non asf in streams
|
||||
{
|
||||
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
|
||||
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
|
||||
'info_dict': {
|
||||
'id': 'WO_NOS_762771',
|
||||
'ext': 'mp4',
|
||||
'title': 'Hoe gaat Europa verder na Parijs?',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
return self._get_info(video_id)
|
||||
|
||||
def _get_info(self, video_id):
|
||||
@@ -81,12 +97,8 @@ class NPOIE(InfoExtractor):
|
||||
# We have to remove the javascript callback
|
||||
transform_source=strip_jsonp,
|
||||
)
|
||||
token_page = self._download_webpage(
|
||||
'http://ida.omroep.nl/npoplayer/i.js',
|
||||
video_id,
|
||||
note='Downloading token'
|
||||
)
|
||||
token = self._search_regex(r'npoplayer\.token = "(.+?)"', token_page, 'token')
|
||||
|
||||
token = self._get_token(video_id)
|
||||
|
||||
formats = []
|
||||
|
||||
@@ -125,6 +137,12 @@ class NPOIE(InfoExtractor):
|
||||
stream_url = stream.get('url')
|
||||
if not stream_url:
|
||||
continue
|
||||
if '.asf' not in stream_url:
|
||||
formats.append({
|
||||
'url': stream_url,
|
||||
'quality': stream.get('kwaliteit'),
|
||||
})
|
||||
continue
|
||||
asx = self._download_xml(
|
||||
stream_url, video_id,
|
||||
'Downloading stream %d ASX playlist' % i,
|
||||
@@ -154,6 +172,83 @@ class NPOIE(InfoExtractor):
|
||||
}
|
||||
|
||||
|
||||
class NPOLiveIE(NPOBaseIE):
|
||||
IE_NAME = 'npo.nl:live'
|
||||
_VALID_URL = r'https?://www\.npo\.nl/live/(?P<id>.+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.npo.nl/live/npo-1',
|
||||
'info_dict': {
|
||||
'id': 'LI_NEDERLAND1_136692',
|
||||
'display_id': 'npo-1',
|
||||
'ext': 'mp4',
|
||||
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'description': 'Livestream',
|
||||
'is_live': True,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
live_id = self._search_regex(
|
||||
r'data-prid="([^"]+)"', webpage, 'live id')
|
||||
|
||||
metadata = self._download_json(
|
||||
'http://e.omroep.nl/metadata/%s' % live_id,
|
||||
display_id, transform_source=strip_jsonp)
|
||||
|
||||
token = self._get_token(display_id)
|
||||
|
||||
formats = []
|
||||
|
||||
streams = metadata.get('streams')
|
||||
if streams:
|
||||
for stream in streams:
|
||||
stream_type = stream.get('type').lower()
|
||||
if stream_type == 'ss':
|
||||
continue
|
||||
stream_info = self._download_json(
|
||||
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
|
||||
% (stream.get('url'), token),
|
||||
display_id, 'Downloading %s JSON' % stream_type)
|
||||
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
|
||||
continue
|
||||
stream_url = self._download_json(
|
||||
stream_info['stream'], display_id,
|
||||
'Downloading %s URL' % stream_type,
|
||||
transform_source=strip_jsonp)
|
||||
if stream_type == 'hds':
|
||||
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
|
||||
# f4m downloader downloads only piece of live stream
|
||||
for f4m_format in f4m_formats:
|
||||
f4m_format['preference'] = -1
|
||||
formats.extend(f4m_formats)
|
||||
elif stream_type == 'hls':
|
||||
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
|
||||
else:
|
||||
formats.append({
|
||||
'url': stream_url,
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': live_id,
|
||||
'display_id': display_id,
|
||||
'title': self._live_title(metadata['titel']),
|
||||
'description': metadata['info'],
|
||||
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
|
||||
'formats': formats,
|
||||
'is_live': True,
|
||||
}
|
||||
|
||||
|
||||
class TegenlichtVproIE(NPOIE):
|
||||
IE_NAME = 'tegenlicht.vpro.nl'
|
||||
_VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
|
||||
|
||||
@@ -7,8 +7,10 @@ from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
)
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
|
||||
|
||||
class NRKIE(InfoExtractor):
|
||||
@@ -71,8 +73,8 @@ class NRKIE(InfoExtractor):
|
||||
}
|
||||
|
||||
|
||||
class NRKTVIE(InfoExtractor):
|
||||
_VALID_URL = r'http://tv\.nrk(?:super)?\.no/(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?'
|
||||
class NRKTVIE(SubtitlesInfoExtractor):
|
||||
_VALID_URL = r'(?P<baseurl>http://tv\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
@@ -147,6 +149,29 @@ class NRKTVIE(InfoExtractor):
|
||||
}
|
||||
]
|
||||
|
||||
def _seconds2str(self, s):
|
||||
return '%02d:%02d:%02d.%03d' % (s / 3600, (s % 3600) / 60, s % 60, (s % 1) * 1000)
|
||||
|
||||
def _debug_print(self, txt):
|
||||
if self._downloader.params.get('verbose', False):
|
||||
self.to_screen('[debug] %s' % txt)
|
||||
|
||||
def _extract_captions(self, subtitlesurl, video_id, baseurl):
|
||||
url = "%s%s" % (baseurl, subtitlesurl)
|
||||
self._debug_print('%s: Subtitle url: %s' % (video_id, url))
|
||||
captions = self._download_xml(url, video_id, 'Downloading subtitles')
|
||||
lang = captions.get('lang', 'no')
|
||||
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/ns/ttml}'))
|
||||
srt = ''
|
||||
for pos, p in enumerate(ps):
|
||||
begin = parse_duration(p.get('begin'))
|
||||
duration = parse_duration(p.get('dur'))
|
||||
starttime = self._seconds2str(begin)
|
||||
endtime = self._seconds2str(begin + duration)
|
||||
text = '\n'.join(p.itertext())
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), starttime, endtime, text)
|
||||
return {lang: srt}
|
||||
|
||||
def _extract_f4m(self, manifest_url, video_id):
|
||||
return self._extract_f4m_formats(manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id)
|
||||
|
||||
@@ -154,6 +179,7 @@ class NRKTVIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
part_id = mobj.group('part_id')
|
||||
baseurl = mobj.group('baseurl')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
@@ -210,9 +236,18 @@ class NRKTVIE(InfoExtractor):
|
||||
m3u8_url = re.search(r'data-hls-media="([^"]+)"', webpage)
|
||||
if m3u8_url:
|
||||
formats.extend(self._extract_m3u8_formats(m3u8_url.group(1), video_id, 'mp4'))
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles_url = self._html_search_regex(
|
||||
r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"',
|
||||
webpage, 'subtitle URL', default=None)
|
||||
subtitles = None
|
||||
if subtitles_url:
|
||||
subtitles = self._extract_captions(subtitles_url, video_id, baseurl)
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
@@ -221,4 +256,5 @@ class NRKTVIE(InfoExtractor):
|
||||
'upload_date': upload_date,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
@@ -128,13 +128,16 @@ class ORFTVthekIE(InfoExtractor):
|
||||
}
|
||||
|
||||
|
||||
# Audios on ORF radio are only available for 7 days, so we can't add tests.
|
||||
|
||||
|
||||
class ORFOE1IE(InfoExtractor):
|
||||
IE_NAME = 'orf:oe1'
|
||||
IE_DESC = 'Radio Österreich 1'
|
||||
_VALID_URL = r'http://oe1\.orf\.at/programm/(?P<id>[0-9]+)'
|
||||
_VALID_URL = r'http://oe1\.orf\.at/(?:programm/|konsole.*?#\?track_id=)(?P<id>[0-9]+)'
|
||||
|
||||
# Audios on ORF radio are only available for 7 days, so we can't add tests.
|
||||
_TEST = {
|
||||
'url': 'http://oe1.orf.at/konsole?show=on_demand#?track_id=394211',
|
||||
'only_matching': True,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
@@ -160,7 +163,7 @@ class ORFOE1IE(InfoExtractor):
|
||||
|
||||
|
||||
class ORFFM4IE(InfoExtractor):
|
||||
IE_DESC = 'orf:fm4'
|
||||
IE_NAME = 'orf:fm4'
|
||||
IE_DESC = 'radio FM4'
|
||||
_VALID_URL = r'http://fm4\.orf\.at/7tage/?#(?P<date>[0-9]+)/(?P<show>\w+)'
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ from ..compat import (
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
str_to_int,
|
||||
)
|
||||
from ..aes import (
|
||||
@@ -44,6 +45,15 @@ class PornHubIE(InfoExtractor):
|
||||
req.add_header('Cookie', 'age_verified=1')
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
|
||||
error_msg = self._html_search_regex(
|
||||
r'(?s)<div class="userMessageSection[^"]*".*?>(.*?)</div>',
|
||||
webpage, 'error message', default=None)
|
||||
if error_msg:
|
||||
error_msg = re.sub(r'\s+', ' ', error_msg)
|
||||
raise ExtractorError(
|
||||
'PornHub said: %s' % error_msg,
|
||||
expected=True, video_id=video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
|
||||
video_uploader = self._html_search_regex(
|
||||
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|<span class="username)[^>]+>(.+?)<',
|
||||
|
||||
62
youtube_dl/extractor/rte.py
Normal file
62
youtube_dl/extractor/rte.py
Normal file
@@ -0,0 +1,62 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
)
|
||||
|
||||
|
||||
class RteIE(InfoExtractor):
|
||||
_VALID_URL = r'http?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/(?P<id>[0-9]+)/'
|
||||
_TEST = {
|
||||
'url': 'http://www.rte.ie/player/de/show/10363114/',
|
||||
'info_dict': {
|
||||
'id': '10363114',
|
||||
'ext': 'mp4',
|
||||
'title': 'One News',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'description': 'The One O\'Clock News followed by Weather.',
|
||||
'duration': 436.844,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'f4m fails with --test atm'
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._html_search_meta('description', webpage, 'description')
|
||||
duration = float_or_none(self._html_search_meta(
|
||||
'duration', webpage, 'duration', fatal=False), 1000)
|
||||
|
||||
thumbnail_id = self._search_regex(
|
||||
r'<meta name="thumbnail" content="uri:irus:(.*?)" />', webpage, 'thumbnail')
|
||||
thumbnail = 'http://img.rasset.ie/' + thumbnail_id + '.jpg'
|
||||
|
||||
feeds_url = self._html_search_meta("feeds-prefix", webpage, 'feeds url') + video_id
|
||||
json_string = self._download_json(feeds_url, video_id)
|
||||
|
||||
# f4m_url = server + relative_url
|
||||
f4m_url = json_string['shows'][0]['media:group'][0]['rte:server'] + json_string['shows'][0]['media:group'][0]['url']
|
||||
f4m_formats = self._extract_f4m_formats(f4m_url, video_id)
|
||||
f4m_formats = [{
|
||||
'format_id': f['format_id'],
|
||||
'url': f['url'],
|
||||
'ext': 'mp4',
|
||||
'width': f['width'],
|
||||
'height': f['height'],
|
||||
} for f in f4m_formats]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': f4m_formats,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
}
|
||||
@@ -90,6 +90,20 @@ class SmotriIE(InfoExtractor):
|
||||
},
|
||||
'skip': 'Video is not approved by moderator',
|
||||
},
|
||||
# not approved by moderator, but available
|
||||
{
|
||||
'url': 'http://smotri.com/video/view/?id=v28888533b73',
|
||||
'md5': 'f44bc7adac90af518ef1ecf04893bb34',
|
||||
'info_dict': {
|
||||
'id': 'v28888533b73',
|
||||
'ext': 'mp4',
|
||||
'title': 'Russian Spies Killed By ISIL Child Soldier',
|
||||
'uploader': 'Mopeder',
|
||||
'uploader_id': 'mopeder',
|
||||
'duration': 71,
|
||||
'thumbnail': 'http://frame9.loadup.ru/d7/32/2888853.2.3.jpg',
|
||||
},
|
||||
},
|
||||
# swf player
|
||||
{
|
||||
'url': 'http://pics.smotri.com/scrubber_custom8.swf?file=v9188090500',
|
||||
@@ -146,13 +160,16 @@ class SmotriIE(InfoExtractor):
|
||||
|
||||
video = self._download_json(request, video_id, 'Downloading video JSON')
|
||||
|
||||
if video.get('_moderate_no') or not video.get('moderated'):
|
||||
raise ExtractorError('Video %s has not been approved by moderator' % video_id, expected=True)
|
||||
|
||||
if video.get('error'):
|
||||
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||
|
||||
video_url = video.get('_vidURL') or video.get('_vidURL_mp4')
|
||||
|
||||
if not video_url:
|
||||
if video.get('_moderate_no') or not video.get('moderated'):
|
||||
raise ExtractorError(
|
||||
'Video %s has not been approved by moderator' % video_id, expected=True)
|
||||
|
||||
if video.get('error'):
|
||||
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||
|
||||
title = video['title']
|
||||
thumbnail = video['_imgURL']
|
||||
upload_date = unified_strdate(video['added'])
|
||||
|
||||
@@ -4,7 +4,14 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
ExtractorError,
|
||||
)
|
||||
from .spiegeltv import SpiegeltvIE
|
||||
|
||||
|
||||
@@ -60,21 +67,31 @@ class SpiegelIE(InfoExtractor):
|
||||
xml_url = base_url + video_id + '.xml'
|
||||
idoc = self._download_xml(xml_url, video_id)
|
||||
|
||||
formats = [
|
||||
{
|
||||
'format_id': n.tag.rpartition('type')[2],
|
||||
'url': base_url + n.find('./filename').text,
|
||||
'width': int(n.find('./width').text),
|
||||
'height': int(n.find('./height').text),
|
||||
'abr': int(n.find('./audiobitrate').text),
|
||||
'vbr': int(n.find('./videobitrate').text),
|
||||
'vcodec': n.find('./codec').text,
|
||||
'acodec': 'MP4A',
|
||||
}
|
||||
for n in list(idoc)
|
||||
# Blacklist type 6, it's extremely LQ and not available on the same server
|
||||
if n.tag.startswith('type') and n.tag != 'type6'
|
||||
]
|
||||
formats = []
|
||||
for n in list(idoc):
|
||||
if n.tag.startswith('type') and n.tag != 'type6':
|
||||
format_id = n.tag.rpartition('type')[2]
|
||||
video_url = base_url + n.find('./filename').text
|
||||
# Test video URLs beforehand as some of them are invalid
|
||||
try:
|
||||
self._request_webpage(
|
||||
HEADRequest(video_url), video_id,
|
||||
'Checking %s video URL' % format_id)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
|
||||
self.report_warning(
|
||||
'%s video URL is invalid, skipping' % format_id, video_id)
|
||||
continue
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': video_url,
|
||||
'width': int(n.find('./width').text),
|
||||
'height': int(n.find('./height').text),
|
||||
'abr': int(n.find('./audiobitrate').text),
|
||||
'vbr': int(n.find('./videobitrate').text),
|
||||
'vcodec': n.find('./codec').text,
|
||||
'acodec': 'MP4A',
|
||||
})
|
||||
duration = float(idoc[0].findall('./duration')[0].text)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
51
youtube_dl/extractor/streetvoice.py
Normal file
51
youtube_dl/extractor/streetvoice.py
Normal file
@@ -0,0 +1,51 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class StreetVoiceIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P<id>[0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://streetvoice.com/skippylu/songs/94440/',
|
||||
'md5': '15974627fc01a29e492c98593c2fd472',
|
||||
'info_dict': {
|
||||
'id': '94440',
|
||||
'ext': 'mp3',
|
||||
'filesize': 4167053,
|
||||
'title': '輸',
|
||||
'description': 'Crispy脆樂團 - 輸',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 260,
|
||||
'upload_date': '20091018',
|
||||
'uploader': 'Crispy脆樂團',
|
||||
'uploader_id': '627810',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://tw.streetvoice.com/skippylu/songs/94440/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
song_id = self._match_id(url)
|
||||
|
||||
song = self._download_json(
|
||||
'http://streetvoice.com/music/api/song/%s' % song_id, song_id)
|
||||
|
||||
title = song['name']
|
||||
author = song['musician']['name']
|
||||
|
||||
return {
|
||||
'id': song_id,
|
||||
'url': song['file'],
|
||||
'filesize': song.get('size'),
|
||||
'title': title,
|
||||
'description': '%s - %s' % (author, title),
|
||||
'thumbnail': self._proto_relative_url(song.get('image'), 'http:'),
|
||||
'duration': song.get('length'),
|
||||
'upload_date': unified_strdate(song.get('created_at')),
|
||||
'uploader': author,
|
||||
'uploader_id': compat_str(song['musician']['id']),
|
||||
}
|
||||
@@ -9,17 +9,23 @@ from ..utils import ExtractorError
|
||||
class TinyPicIE(InfoExtractor):
|
||||
IE_NAME = 'tinypic'
|
||||
IE_DESC = 'tinypic.com videos'
|
||||
_VALID_URL = r'http://tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+'
|
||||
_VALID_URL = r'http://(?:.+?\.)?tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8',
|
||||
'md5': '609b74432465364e72727ebc6203f044',
|
||||
'info_dict': {
|
||||
'id': '6xw7tc',
|
||||
'ext': 'flv',
|
||||
'title': 'shadow phenomenon weird',
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8',
|
||||
'md5': '609b74432465364e72727ebc6203f044',
|
||||
'info_dict': {
|
||||
'id': '6xw7tc',
|
||||
'ext': 'flv',
|
||||
'title': 'shadow phenomenon weird',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://de.tinypic.com/player.php?v=dy90yh&s=8',
|
||||
'only_matching': True,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
@@ -1,37 +1,139 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class TvpIE(InfoExtractor):
|
||||
IE_NAME = 'tvp.pl'
|
||||
_VALID_URL = r'https?://www\.tvp\.pl/.*?wideo/(?P<date>\d+)/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:vod|www)\.tvp\.pl/.*/(?P<id>\d+)$'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238',
|
||||
'md5': '148408967a6a468953c0a75cbdaf0d7a',
|
||||
_TESTS = [{
|
||||
'url': 'http://vod.tvp.pl/filmy-fabularne/filmy-za-darmo/ogniem-i-mieczem/wideo/odc-2/4278035',
|
||||
'md5': 'cdd98303338b8a7f7abab5cd14092bf2',
|
||||
'info_dict': {
|
||||
'id': '12878238',
|
||||
'id': '4278035',
|
||||
'ext': 'wmv',
|
||||
'title': '31.10.2013 - Odcinek 2',
|
||||
'description': '31.10.2013 - Odcinek 2',
|
||||
'title': 'Ogniem i mieczem, odc. 2',
|
||||
},
|
||||
'skip': 'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.'
|
||||
}
|
||||
}, {
|
||||
'url': 'http://vod.tvp.pl/seriale/obyczajowe/czas-honoru/sezon-1-1-13/i-seria-odc-13/194536',
|
||||
'md5': '8aa518c15e5cc32dfe8db400dc921fbb',
|
||||
'info_dict': {
|
||||
'id': '194536',
|
||||
'ext': 'mp4',
|
||||
'title': 'Czas honoru, I seria – odc. 13',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176',
|
||||
'md5': 'c3b15ed1af288131115ff17a17c19dda',
|
||||
'info_dict': {
|
||||
'id': '17916176',
|
||||
'ext': 'mp4',
|
||||
'title': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://vod.tvp.pl/seriale/obyczajowe/na-sygnale/sezon-2-27-/odc-39/17834272',
|
||||
'md5': 'c3b15ed1af288131115ff17a17c19dda',
|
||||
'info_dict': {
|
||||
'id': '17834272',
|
||||
'ext': 'mp4',
|
||||
'title': 'Na sygnale, odc. 39',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
json_url = 'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id
|
||||
params = self._download_json(
|
||||
json_url, video_id, "Downloading video metadata")
|
||||
video_url = params['video_url']
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id)
|
||||
|
||||
title = self._search_regex(
|
||||
r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1',
|
||||
webpage, 'title', group='title')
|
||||
series_title = self._search_regex(
|
||||
r'name\s*:\s*([\'"])SeriesTitle\1\s*,\s*value\s*:\s*\1(?P<series>.+?)\1',
|
||||
webpage, 'series', group='series', default=None)
|
||||
if series_title:
|
||||
title = '%s, %s' % (series_title, title)
|
||||
|
||||
thumbnail = self._search_regex(
|
||||
r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r'0:{src:([\'"])(?P<url>.*?)\1', webpage, 'formats', group='url', default=None)
|
||||
if not video_url:
|
||||
video_url = self._download_json(
|
||||
'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id,
|
||||
video_id)['video_url']
|
||||
|
||||
ext = video_url.rsplit('.', 1)[-1]
|
||||
if ext != 'ism/manifest':
|
||||
if '/' in ext:
|
||||
ext = 'mp4'
|
||||
formats = [{
|
||||
'format_id': 'direct',
|
||||
'url': video_url,
|
||||
'ext': ext,
|
||||
}]
|
||||
else:
|
||||
m3u8_url = re.sub('([^/]*)\.ism/manifest', r'\1.ism/\1.m3u8', video_url)
|
||||
formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'ext': 'wmv',
|
||||
'url': video_url,
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class TvpSeriesIE(InfoExtractor):
|
||||
IE_NAME = 'tvp.pl:Series'
|
||||
_VALID_URL = r'https?://vod\.tvp\.pl/(?:[^/]+/){2}(?P<id>[^/]+)/?$'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://vod.tvp.pl/filmy-fabularne/filmy-za-darmo/ogniem-i-mieczem',
|
||||
'info_dict': {
|
||||
'title': 'Ogniem i mieczem',
|
||||
'id': '4278026',
|
||||
},
|
||||
'playlist_count': 4,
|
||||
}, {
|
||||
'url': 'http://vod.tvp.pl/audycje/podroze/boso-przez-swiat',
|
||||
'info_dict': {
|
||||
'title': 'Boso przez świat',
|
||||
'id': '9329207',
|
||||
},
|
||||
'playlist_count': 86,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id, tries=5)
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'(?s) id=[\'"]path[\'"]>(?:.*? / ){2}(.*?)</span>', webpage, 'series')
|
||||
playlist_id = self._search_regex(r'nodeId:\s*(\d+)', webpage, 'playlist id')
|
||||
playlist = self._download_webpage(
|
||||
'http://vod.tvp.pl/vod/seriesAjax?type=series&nodeId=%s&recommend'
|
||||
'edId=0&sort=&page=0&pageSize=10000' % playlist_id, display_id, tries=5,
|
||||
note='Downloading playlist')
|
||||
|
||||
videos_paths = re.findall(
|
||||
'(?s)class="shortTitle">.*?href="(/[^"]+)', playlist)
|
||||
entries = [
|
||||
self.url_result('http://vod.tvp.pl%s' % v_path, ie=TvpIE.ie_key())
|
||||
for v_path in videos_paths]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'entries': entries,
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import re
|
||||
import random
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
@@ -15,44 +17,12 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class TwitchIE(InfoExtractor):
|
||||
# TODO: One broadcast may be split into multiple videos. The key
|
||||
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
||||
# starts at 1 and increases. Can we treat all parts as one video?
|
||||
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?twitch\.tv/
|
||||
(?:
|
||||
(?P<channelid>[^/]+)|
|
||||
(?:(?:[^/]+)/v/(?P<vodid>[^/]+))|
|
||||
(?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
|
||||
(?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
|
||||
)
|
||||
/?(?:\#.*)?$
|
||||
"""
|
||||
_PAGE_LIMIT = 100
|
||||
class TwitchBaseIE(InfoExtractor):
|
||||
_VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
|
||||
|
||||
_API_BASE = 'https://api.twitch.tv'
|
||||
_USHER_BASE = 'http://usher.twitch.tv'
|
||||
_LOGIN_URL = 'https://secure.twitch.tv/user/login'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.twitch.tv/riotgames/b/577357806',
|
||||
'info_dict': {
|
||||
'id': 'a577357806',
|
||||
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
|
||||
},
|
||||
'playlist_mincount': 12,
|
||||
}, {
|
||||
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
|
||||
'info_dict': {
|
||||
'id': 'c5285812',
|
||||
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
|
||||
},
|
||||
'playlist_mincount': 3,
|
||||
}, {
|
||||
'url': 'http://www.twitch.tv/vanillatv',
|
||||
'info_dict': {
|
||||
'id': 'vanillatv',
|
||||
'title': 'VanillaTV',
|
||||
},
|
||||
'playlist_mincount': 412,
|
||||
}]
|
||||
|
||||
def _handle_error(self, response):
|
||||
if not isinstance(response, dict):
|
||||
@@ -64,71 +34,10 @@ class TwitchIE(InfoExtractor):
|
||||
expected=True)
|
||||
|
||||
def _download_json(self, url, video_id, note='Downloading JSON metadata'):
|
||||
response = super(TwitchIE, self)._download_json(url, video_id, note)
|
||||
response = super(TwitchBaseIE, self)._download_json(url, video_id, note)
|
||||
self._handle_error(response)
|
||||
return response
|
||||
|
||||
def _extract_media(self, item, item_id):
|
||||
ITEMS = {
|
||||
'a': 'video',
|
||||
'v': 'vod',
|
||||
'c': 'chapter',
|
||||
}
|
||||
info = self._extract_info(self._download_json(
|
||||
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
|
||||
'Downloading %s info JSON' % ITEMS[item]))
|
||||
|
||||
if item == 'v':
|
||||
access_token = self._download_json(
|
||||
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
|
||||
'Downloading %s access token' % ITEMS[item])
|
||||
formats = self._extract_m3u8_formats(
|
||||
'http://usher.twitch.tv/vod/%s?nauth=%s&nauthsig=%s'
|
||||
% (item_id, access_token['token'], access_token['sig']),
|
||||
item_id, 'mp4')
|
||||
info['formats'] = formats
|
||||
return info
|
||||
|
||||
response = self._download_json(
|
||||
'%s/api/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
|
||||
'Downloading %s playlist JSON' % ITEMS[item])
|
||||
entries = []
|
||||
chunks = response['chunks']
|
||||
qualities = list(chunks.keys())
|
||||
for num, fragment in enumerate(zip(*chunks.values()), start=1):
|
||||
formats = []
|
||||
for fmt_num, fragment_fmt in enumerate(fragment):
|
||||
format_id = qualities[fmt_num]
|
||||
fmt = {
|
||||
'url': fragment_fmt['url'],
|
||||
'format_id': format_id,
|
||||
'quality': 1 if format_id == 'live' else 0,
|
||||
}
|
||||
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
|
||||
if m:
|
||||
fmt['height'] = int(m.group('height'))
|
||||
formats.append(fmt)
|
||||
self._sort_formats(formats)
|
||||
entry = dict(info)
|
||||
entry['id'] = '%s_%d' % (entry['id'], num)
|
||||
entry['title'] = '%s part %d' % (entry['title'], num)
|
||||
entry['formats'] = formats
|
||||
entries.append(entry)
|
||||
return self.playlist_result(entries, info['id'], info['title'])
|
||||
|
||||
def _extract_info(self, info):
|
||||
return {
|
||||
'id': info['_id'],
|
||||
'title': info['title'],
|
||||
'description': info['description'],
|
||||
'duration': info['length'],
|
||||
'thumbnail': info['preview'],
|
||||
'uploader': info['channel']['display_name'],
|
||||
'uploader_id': info['channel']['name'],
|
||||
'timestamp': parse_iso8601(info['recorded_at']),
|
||||
'view_count': info['views'],
|
||||
}
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
@@ -167,81 +76,276 @@ class TwitchIE(InfoExtractor):
|
||||
raise ExtractorError(
|
||||
'Unable to login: %s' % m.group('msg').strip(), expected=True)
|
||||
|
||||
|
||||
class TwitchItemBaseIE(TwitchBaseIE):
|
||||
def _download_info(self, item, item_id):
|
||||
return self._extract_info(self._download_json(
|
||||
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
|
||||
'Downloading %s info JSON' % self._ITEM_TYPE))
|
||||
|
||||
def _extract_media(self, item_id):
|
||||
info = self._download_info(self._ITEM_SHORTCUT, item_id)
|
||||
response = self._download_json(
|
||||
'%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
|
||||
'Downloading %s playlist JSON' % self._ITEM_TYPE)
|
||||
entries = []
|
||||
chunks = response['chunks']
|
||||
qualities = list(chunks.keys())
|
||||
for num, fragment in enumerate(zip(*chunks.values()), start=1):
|
||||
formats = []
|
||||
for fmt_num, fragment_fmt in enumerate(fragment):
|
||||
format_id = qualities[fmt_num]
|
||||
fmt = {
|
||||
'url': fragment_fmt['url'],
|
||||
'format_id': format_id,
|
||||
'quality': 1 if format_id == 'live' else 0,
|
||||
}
|
||||
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
|
||||
if m:
|
||||
fmt['height'] = int(m.group('height'))
|
||||
formats.append(fmt)
|
||||
self._sort_formats(formats)
|
||||
entry = dict(info)
|
||||
entry['id'] = '%s_%d' % (entry['id'], num)
|
||||
entry['title'] = '%s part %d' % (entry['title'], num)
|
||||
entry['formats'] = formats
|
||||
entries.append(entry)
|
||||
return self.playlist_result(entries, info['id'], info['title'])
|
||||
|
||||
def _extract_info(self, info):
|
||||
return {
|
||||
'id': info['_id'],
|
||||
'title': info['title'],
|
||||
'description': info['description'],
|
||||
'duration': info['length'],
|
||||
'thumbnail': info['preview'],
|
||||
'uploader': info['channel']['display_name'],
|
||||
'uploader_id': info['channel']['name'],
|
||||
'timestamp': parse_iso8601(info['recorded_at']),
|
||||
'view_count': info['views'],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj.group('chapterid'):
|
||||
return self._extract_media('c', mobj.group('chapterid'))
|
||||
return self._extract_media(self._match_id(url))
|
||||
|
||||
"""
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
|
||||
|
||||
class TwitchVideoIE(TwitchItemBaseIE):
|
||||
IE_NAME = 'twitch:video'
|
||||
_VALID_URL = r'%s/[^/]+/b/(?P<id>[^/]+)' % TwitchBaseIE._VALID_URL_BASE
|
||||
_ITEM_TYPE = 'video'
|
||||
_ITEM_SHORTCUT = 'a'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.twitch.tv/riotgames/b/577357806',
|
||||
'info_dict': {
|
||||
'id': 'a577357806',
|
||||
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
|
||||
},
|
||||
'playlist_mincount': 12,
|
||||
}
|
||||
|
||||
|
||||
class TwitchChapterIE(TwitchItemBaseIE):
|
||||
IE_NAME = 'twitch:chapter'
|
||||
_VALID_URL = r'%s/[^/]+/c/(?P<id>[^/]+)' % TwitchBaseIE._VALID_URL_BASE
|
||||
_ITEM_TYPE = 'chapter'
|
||||
_ITEM_SHORTCUT = 'c'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
|
||||
'info_dict': {
|
||||
'id': 'c5285812',
|
||||
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
|
||||
},
|
||||
'playlist_mincount': 3,
|
||||
}, {
|
||||
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
class TwitchVodIE(TwitchItemBaseIE):
|
||||
IE_NAME = 'twitch:vod'
|
||||
_VALID_URL = r'%s/[^/]+/v/(?P<id>[^/]+)' % TwitchBaseIE._VALID_URL_BASE
|
||||
_ITEM_TYPE = 'vod'
|
||||
_ITEM_SHORTCUT = 'v'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.twitch.tv/ksptv/v/3622000',
|
||||
'info_dict': {
|
||||
'id': 'v3622000',
|
||||
'ext': 'mp4',
|
||||
'title': '''KSPTV: Squadcast: "Everyone's on vacation so here's Dahud" Edition!''',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 6951,
|
||||
'timestamp': 1419028564,
|
||||
'upload_date': '20141219',
|
||||
'uploader': 'KSPTV',
|
||||
'uploader_id': 'ksptv',
|
||||
'view_count': int,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
item_id = self._match_id(url)
|
||||
info = self._download_info(self._ITEM_SHORTCUT, item_id)
|
||||
access_token = self._download_json(
|
||||
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
|
||||
'Downloading %s access token' % self._ITEM_TYPE)
|
||||
formats = self._extract_m3u8_formats(
|
||||
'%s/vod/%s?nauth=%s&nauthsig=%s'
|
||||
% (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
|
||||
item_id, 'mp4')
|
||||
info['formats'] = formats
|
||||
return info
|
||||
|
||||
|
||||
class TwitchPlaylistBaseIE(TwitchBaseIE):
|
||||
_PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
|
||||
_PAGE_LIMIT = 100
|
||||
|
||||
def _extract_playlist(self, channel_id):
|
||||
info = self._download_json(
|
||||
'%s/kraken/channels/%s' % (self._API_BASE, channel_id),
|
||||
channel_id, 'Downloading channel info JSON')
|
||||
channel_name = info.get('display_name') or info.get('name')
|
||||
entries = []
|
||||
offset = 0
|
||||
limit = self._PAGE_LIMIT
|
||||
for counter in itertools.count(1):
|
||||
response = self._download_json(
|
||||
self._PLAYLIST_URL % (channel_id, offset, limit),
|
||||
channel_id, 'Downloading %s videos JSON page %d' % (self._PLAYLIST_TYPE, counter))
|
||||
videos = response['videos']
|
||||
if not videos:
|
||||
break
|
||||
entries.extend([self.url_result(video['url']) for video in videos])
|
||||
offset += limit
|
||||
return self.playlist_result(entries, channel_id, channel_name)
|
||||
|
||||
def _real_extract(self, url):
|
||||
return self._extract_playlist(self._match_id(url))
|
||||
|
||||
|
||||
class TwitchProfileIE(TwitchPlaylistBaseIE):
|
||||
IE_NAME = 'twitch:profile'
|
||||
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
|
||||
_PLAYLIST_TYPE = 'profile'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.twitch.tv/vanillatv/profile',
|
||||
'info_dict': {
|
||||
'id': 'vanillatv',
|
||||
'title': 'VanillaTV',
|
||||
},
|
||||
'playlist_mincount': 412,
|
||||
}
|
||||
|
||||
|
||||
class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
|
||||
IE_NAME = 'twitch:past_broadcasts'
|
||||
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
|
||||
_PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
|
||||
_PLAYLIST_TYPE = 'past broadcasts'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
|
||||
'info_dict': {
|
||||
'id': 'spamfish',
|
||||
'title': 'Spamfish',
|
||||
},
|
||||
'playlist_mincount': 54,
|
||||
}
|
||||
|
||||
|
||||
class TwitchStreamIE(TwitchBaseIE):
|
||||
IE_NAME = 'twitch:stream'
|
||||
_VALID_URL = r'%s/(?P<id>[^/]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.twitch.tv/shroomztv',
|
||||
'info_dict': {
|
||||
'id': '12772022048',
|
||||
'display_id': 'shroomztv',
|
||||
'ext': 'mp4',
|
||||
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
|
||||
'is_live': True,
|
||||
'timestamp': 1421928037,
|
||||
'upload_date': '20150122',
|
||||
'uploader': 'ShroomzTV',
|
||||
'uploader_id': 'shroomztv',
|
||||
'view_count': int,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel_id = self._match_id(url)
|
||||
|
||||
stream = self._download_json(
|
||||
'%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
|
||||
'Downloading stream JSON').get('stream')
|
||||
|
||||
# Fallback on profile extraction if stream is offline
|
||||
if not stream:
|
||||
return self.url_result(
|
||||
'http://www.twitch.tv/%s/profile' % channel_id,
|
||||
'TwitchProfile', channel_id)
|
||||
|
||||
access_token = self._download_json(
|
||||
'%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
|
||||
'Downloading channel access token')
|
||||
|
||||
query = {
|
||||
'allow_source': 'true',
|
||||
'p': random.randint(1000000, 10000000),
|
||||
'player': 'twitchweb',
|
||||
'segment_preference': '4',
|
||||
'sig': access_token['sig'],
|
||||
'token': access_token['token'],
|
||||
}
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
'%s/api/channel/hls/%s.m3u8?%s'
|
||||
% (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query).encode('utf-8')),
|
||||
channel_id, 'mp4')
|
||||
|
||||
view_count = stream.get('viewers')
|
||||
timestamp = parse_iso8601(stream.get('created_at'))
|
||||
|
||||
channel = stream['channel']
|
||||
title = self._live_title(channel.get('display_name') or channel.get('name'))
|
||||
description = channel.get('status')
|
||||
|
||||
thumbnails = []
|
||||
for thumbnail_key, thumbnail_url in stream['preview'].items():
|
||||
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
|
||||
if not m:
|
||||
raise ExtractorError('Cannot find archive of a chapter')
|
||||
archive_id = m.group(1)
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': thumbnail_url,
|
||||
'width': int(m.group('width')),
|
||||
'height': int(m.group('height')),
|
||||
})
|
||||
|
||||
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
||||
doc = self._download_xml(
|
||||
api, chapter_id,
|
||||
note='Downloading chapter information',
|
||||
errnote='Chapter information download failed')
|
||||
for a in doc.findall('.//archive'):
|
||||
if archive_id == a.find('./id').text:
|
||||
break
|
||||
else:
|
||||
raise ExtractorError('Could not find chapter in chapter information')
|
||||
|
||||
video_url = a.find('./video_file_url').text
|
||||
video_ext = video_url.rpartition('.')[2] or 'flv'
|
||||
|
||||
chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
||||
chapter_info = self._download_json(
|
||||
chapter_api_url, 'c' + chapter_id,
|
||||
note='Downloading chapter metadata',
|
||||
errnote='Download of chapter metadata failed')
|
||||
|
||||
bracket_start = int(doc.find('.//bracket_start').text)
|
||||
bracket_end = int(doc.find('.//bracket_end').text)
|
||||
|
||||
# TODO determine start (and probably fix up file)
|
||||
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
||||
#video_url += '?start=' + TODO:start_timestamp
|
||||
# bracket_start is 13290, but we want 51670615
|
||||
self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
|
||||
'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
||||
|
||||
info = {
|
||||
'id': 'c' + chapter_id,
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
'title': chapter_info['title'],
|
||||
'thumbnail': chapter_info['preview'],
|
||||
'description': chapter_info['description'],
|
||||
'uploader': chapter_info['channel']['display_name'],
|
||||
'uploader_id': chapter_info['channel']['name'],
|
||||
}
|
||||
return info
|
||||
"""
|
||||
elif mobj.group('videoid'):
|
||||
return self._extract_media('a', mobj.group('videoid'))
|
||||
elif mobj.group('vodid'):
|
||||
return self._extract_media('v', mobj.group('vodid'))
|
||||
elif mobj.group('channelid'):
|
||||
channel_id = mobj.group('channelid')
|
||||
info = self._download_json(
|
||||
'%s/kraken/channels/%s' % (self._API_BASE, channel_id),
|
||||
channel_id, 'Downloading channel info JSON')
|
||||
channel_name = info.get('display_name') or info.get('name')
|
||||
entries = []
|
||||
offset = 0
|
||||
limit = self._PAGE_LIMIT
|
||||
for counter in itertools.count(1):
|
||||
response = self._download_json(
|
||||
'%s/kraken/channels/%s/videos/?offset=%d&limit=%d'
|
||||
% (self._API_BASE, channel_id, offset, limit),
|
||||
channel_id, 'Downloading channel videos JSON page %d' % counter)
|
||||
videos = response['videos']
|
||||
if not videos:
|
||||
break
|
||||
entries.extend([self.url_result(video['url'], 'Twitch') for video in videos])
|
||||
offset += limit
|
||||
return self.playlist_result(entries, channel_id, channel_name)
|
||||
return {
|
||||
'id': compat_str(stream['_id']),
|
||||
'display_id': channel_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnails': thumbnails,
|
||||
'uploader': channel.get('display_name'),
|
||||
'uploader_id': channel.get('name'),
|
||||
'timestamp': timestamp,
|
||||
'view_count': view_count,
|
||||
'formats': formats,
|
||||
'is_live': True,
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ from ..compat import (
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
get_element_by_id,
|
||||
)
|
||||
@@ -17,13 +18,13 @@ class VeeHDIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://veehd\.com/video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://veehd.com/video/4686958',
|
||||
'url': 'http://veehd.com/video/4639434_Solar-Sinter',
|
||||
'info_dict': {
|
||||
'id': '4686958',
|
||||
'id': '4639434',
|
||||
'ext': 'mp4',
|
||||
'title': 'Time Lapse View from Space ( ISS)',
|
||||
'uploader_id': 'spotted',
|
||||
'description': 'md5:f0094c4cf3a72e22bc4e4239ef767ad7',
|
||||
'title': 'Solar Sinter',
|
||||
'uploader_id': 'VideoEyes',
|
||||
'description': 'md5:46a840e8692ddbaffb5f81d9885cb457',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -34,6 +35,10 @@ class VeeHDIE(InfoExtractor):
|
||||
# See https://github.com/rg3/youtube-dl/issues/2102
|
||||
self._download_webpage(url, video_id, 'Requesting webpage')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if 'This video has been removed<' in webpage:
|
||||
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
|
||||
|
||||
player_path = self._search_regex(
|
||||
r'\$\("#playeriframe"\).attr\({src : "(.+?)"',
|
||||
webpage, 'player path')
|
||||
@@ -42,18 +47,35 @@ class VeeHDIE(InfoExtractor):
|
||||
self._download_webpage(player_url, video_id, 'Requesting player page')
|
||||
player_page = self._download_webpage(
|
||||
player_url, video_id, 'Downloading player page')
|
||||
config_json = self._search_regex(
|
||||
r'value=\'config=({.+?})\'', player_page, 'config json')
|
||||
config = json.loads(config_json)
|
||||
|
||||
video_url = compat_urlparse.unquote(config['clip']['url'])
|
||||
config_json = self._search_regex(
|
||||
r'value=\'config=({.+?})\'', player_page, 'config json', default=None)
|
||||
|
||||
if config_json:
|
||||
config = json.loads(config_json)
|
||||
video_url = compat_urlparse.unquote(config['clip']['url'])
|
||||
else:
|
||||
iframe_src = self._search_regex(
|
||||
r'<iframe[^>]+src="/?([^"]+)"', player_page, 'iframe url')
|
||||
iframe_url = 'http://veehd.com/%s' % iframe_src
|
||||
|
||||
self._download_webpage(iframe_url, video_id, 'Requesting iframe page')
|
||||
iframe_page = self._download_webpage(
|
||||
iframe_url, video_id, 'Downloading iframe page')
|
||||
|
||||
video_url = self._search_regex(
|
||||
r"file\s*:\s*'([^']+)'", iframe_page, 'video url')
|
||||
|
||||
title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])
|
||||
uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>',
|
||||
webpage, 'uploader')
|
||||
thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"',
|
||||
webpage, 'thumbnail')
|
||||
description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul',
|
||||
webpage, 'description', flags=re.DOTALL)
|
||||
uploader_id = self._html_search_regex(
|
||||
r'<a href="/profile/\d+">(.+?)</a>',
|
||||
webpage, 'uploader')
|
||||
thumbnail = self._search_regex(
|
||||
r'<img id="veehdpreview" src="(.+?)"',
|
||||
webpage, 'thumbnail')
|
||||
description = self._html_search_regex(
|
||||
r'<td class="infodropdown".*?<div>(.*?)<ul',
|
||||
webpage, 'description', flags=re.DOTALL)
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
remove_start,
|
||||
)
|
||||
|
||||
@@ -16,34 +20,40 @@ class VideoMegaIE(InfoExtractor):
|
||||
(?:iframe\.php)?\?ref=(?P<id>[A-Za-z0-9]+)
|
||||
'''
|
||||
_TEST = {
|
||||
'url': 'http://videomega.tv/?ref=GKeGPVedBe',
|
||||
'md5': '240fb5bcf9199961f48eb17839b084d6',
|
||||
'url': 'http://videomega.tv/?ref=QR0HCUHI1661IHUCH0RQ',
|
||||
'md5': 'bf5c2f95c4c917536e80936af7bc51e1',
|
||||
'info_dict': {
|
||||
'id': 'GKeGPVedBe',
|
||||
'id': 'QR0HCUHI1661IHUCH0RQ',
|
||||
'ext': 'mp4',
|
||||
'title': 'XXL - All Sports United',
|
||||
'title': 'Big Buck Bunny',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
escaped_data = self._search_regex(
|
||||
r'unescape\("([^"]+)"\)', webpage, 'escaped data')
|
||||
iframe_url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
|
||||
req = compat_urllib_request.Request(iframe_url)
|
||||
req.add_header('Referer', url)
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
|
||||
try:
|
||||
escaped_data = re.findall(r'unescape\("([^"]+)"\)', webpage)[-1]
|
||||
except IndexError:
|
||||
raise ExtractorError('Unable to extract escaped data')
|
||||
|
||||
playlist = compat_urllib_parse.unquote(escaped_data)
|
||||
|
||||
thumbnail = self._search_regex(
|
||||
r'image:\s*"([^"]+)"', playlist, 'thumbnail', fatal=False)
|
||||
url = self._search_regex(r'file:\s*"([^"]+)"', playlist, 'URL')
|
||||
video_url = self._search_regex(r'file:\s*"([^"]+)"', playlist, 'URL')
|
||||
title = remove_start(self._html_search_regex(
|
||||
r'<title>(.*?)</title>', webpage, 'title'), 'VideoMega.tv - ')
|
||||
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'url': url,
|
||||
'url': video_url,
|
||||
}]
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -52,4 +62,5 @@ class VideoMegaIE(InfoExtractor):
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
'http_referer': iframe_url,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -67,6 +68,10 @@ class WDRIE(InfoExtractor):
|
||||
'upload_date': '20140717',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html',
|
||||
'playlist_mincount': 146,
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -81,6 +86,27 @@ class WDRIE(InfoExtractor):
|
||||
self.url_result(page_url + href, 'WDR')
|
||||
for href in re.findall(r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX, webpage)
|
||||
]
|
||||
|
||||
if entries: # Playlist page
|
||||
return self.playlist_result(entries, page_id)
|
||||
|
||||
# Overview page
|
||||
entries = []
|
||||
for page_num in itertools.count(2):
|
||||
hrefs = re.findall(
|
||||
r'<li class="mediathekvideo"\s*>\s*<img[^>]*>\s*<a href="(/mediathek/video/[^"]+)"',
|
||||
webpage)
|
||||
entries.extend(
|
||||
self.url_result(page_url + href, 'WDR')
|
||||
for href in hrefs)
|
||||
next_url_m = re.search(
|
||||
r'<li class="nextToLast">\s*<a href="([^"]+)"', webpage)
|
||||
if not next_url_m:
|
||||
break
|
||||
next_url = page_url + next_url_m.group(1)
|
||||
webpage = self._download_webpage(
|
||||
next_url, page_id,
|
||||
note='Downloading playlist page %d' % page_num)
|
||||
return self.playlist_result(entries, page_id)
|
||||
|
||||
flashvars = compat_parse_qs(
|
||||
@@ -172,8 +198,7 @@ class WDRMausIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
param_code = self._html_search_regex(
|
||||
@@ -224,5 +249,3 @@ class WDRMausIE(InfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
# TODO test _1
|
||||
|
||||
@@ -30,7 +30,7 @@ class XboxClipsIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._html_search_regex(
|
||||
r'>(?:Link|Download): <a href="([^"]+)">', webpage, 'video URL')
|
||||
r'>(?:Link|Download): <a[^>]+href="([^"]+)"', webpage, 'video URL')
|
||||
title = self._html_search_regex(
|
||||
r'<title>XboxClips \| ([^<]+)</title>', webpage, 'title')
|
||||
upload_date = unified_strdate(self._html_search_regex(
|
||||
|
||||
@@ -264,9 +264,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
|
||||
|
||||
# Dash mp4 audio
|
||||
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
|
||||
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
|
||||
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
|
||||
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50},
|
||||
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50},
|
||||
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50},
|
||||
|
||||
# Dash webm
|
||||
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
|
||||
@@ -394,6 +394,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
'format': '141',
|
||||
},
|
||||
},
|
||||
# JS player signature function name containing $
|
||||
{
|
||||
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
|
||||
'info_dict': {
|
||||
'id': 'nfWlot6h_JM',
|
||||
'ext': 'm4a',
|
||||
'title': 'Taylor Swift - Shake It Off',
|
||||
'description': 'md5:2acfda1b285bdd478ccec22f9918199d',
|
||||
'uploader': 'TaylorSwiftVEVO',
|
||||
'uploader_id': 'TaylorSwiftVEVO',
|
||||
'upload_date': '20140818',
|
||||
},
|
||||
'params': {
|
||||
'youtube_include_dash_manifest': True,
|
||||
'format': '141',
|
||||
},
|
||||
},
|
||||
# Controversy video
|
||||
{
|
||||
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
|
||||
@@ -465,6 +482,20 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
'skip_download': 'requires avconv',
|
||||
}
|
||||
},
|
||||
# Non-square pixels
|
||||
{
|
||||
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
|
||||
'info_dict': {
|
||||
'id': '_b-2C3KPAM0',
|
||||
'ext': 'mp4',
|
||||
'stretched_ratio': 16 / 9.,
|
||||
'upload_date': '20110310',
|
||||
'uploader_id': 'AllenMeow',
|
||||
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
|
||||
'uploader': '孫艾倫',
|
||||
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -574,7 +605,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
|
||||
def _parse_sig_js(self, jscode):
|
||||
funcname = self._search_regex(
|
||||
r'\.sig\|\|([a-zA-Z0-9]+)\(', jscode,
|
||||
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
|
||||
'Initial JS player signature function name')
|
||||
|
||||
jsi = JSInterpreter(jscode)
|
||||
@@ -1051,6 +1082,16 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
f['preference'] = f.get('preference', 0) - 10000
|
||||
formats.extend(dash_formats)
|
||||
|
||||
# Check for malformed aspect ratio
|
||||
stretched_m = re.search(
|
||||
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
|
||||
video_webpage)
|
||||
if stretched_m:
|
||||
ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
|
||||
for f in formats:
|
||||
if f.get('vcodec') != 'none':
|
||||
f['stretched_ratio'] = ratio
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
||||
@@ -148,14 +148,6 @@ def parseOpts(overrideArguments=None):
|
||||
'--extractor-descriptions',
|
||||
action='store_true', dest='list_extractor_descriptions', default=False,
|
||||
help='Output descriptions of all supported extractors')
|
||||
general.add_option(
|
||||
'--proxy', dest='proxy',
|
||||
default=None, metavar='URL',
|
||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
||||
general.add_option(
|
||||
'--socket-timeout',
|
||||
dest='socket_timeout', type=float, default=None,
|
||||
help='Time to wait before giving up, in seconds')
|
||||
general.add_option(
|
||||
'--default-search',
|
||||
dest='default_search', metavar='PREFIX',
|
||||
@@ -173,6 +165,31 @@ def parseOpts(overrideArguments=None):
|
||||
default=False,
|
||||
help='Do not extract the videos of a playlist, only list them.')
|
||||
|
||||
network = optparse.OptionGroup(parser, 'Network Options')
|
||||
network.add_option(
|
||||
'--proxy', dest='proxy',
|
||||
default=None, metavar='URL',
|
||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
||||
network.add_option(
|
||||
'--socket-timeout',
|
||||
dest='socket_timeout', type=float, default=None, metavar='SECONDS',
|
||||
help='Time to wait before giving up, in seconds')
|
||||
network.add_option(
|
||||
'--source-address',
|
||||
metavar='IP', dest='source_address', default=None,
|
||||
help='Client-side IP address to bind to (experimental)',
|
||||
)
|
||||
network.add_option(
|
||||
'-4', '--force-ipv4',
|
||||
action='store_const', const='0.0.0.0', dest='source_address',
|
||||
help='Make all connections via IPv4 (experimental)',
|
||||
)
|
||||
network.add_option(
|
||||
'-6', '--force-ipv6',
|
||||
action='store_const', const='::', dest='source_address',
|
||||
help='Make all connections via IPv6 (experimental)',
|
||||
)
|
||||
|
||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
||||
selection.add_option(
|
||||
'--playlist-start',
|
||||
@@ -247,7 +264,7 @@ def parseOpts(overrideArguments=None):
|
||||
authentication.add_option(
|
||||
'-p', '--password',
|
||||
dest='password', metavar='PASSWORD',
|
||||
help='account password')
|
||||
help='account password. If this option is left out, youtube-dl will ask interactively.')
|
||||
authentication.add_option(
|
||||
'-2', '--twofactor',
|
||||
dest='twofactor', metavar='TWOFACTOR',
|
||||
@@ -272,6 +289,17 @@ def parseOpts(overrideArguments=None):
|
||||
'extensions aac, m4a, mp3, mp4, ogg, wav, webm. '
|
||||
'You can also use the special names "best",'
|
||||
' "bestvideo", "bestaudio", "worst". '
|
||||
' You can filter the video results by putting a condition in'
|
||||
' brackets, as in -f "best[height=720]"'
|
||||
' (or -f "[filesize>10M]"). '
|
||||
' This works for filesize, height, width, tbr, abr, and vbr'
|
||||
' and the comparisons <, <=, >, >=, =, != .'
|
||||
' Formats for which the value is not known are excluded unless you'
|
||||
' put a question mark (?) after the operator.'
|
||||
' You can combine format filters, so '
|
||||
'-f "[height <=? 720][tbr>500]" '
|
||||
'selects up to 720p videos (or videos where the height is not '
|
||||
'known) with a bitrate of at least 500 KBit/s.'
|
||||
' By default, youtube-dl will pick the best quality.'
|
||||
' Use commas to download multiple audio formats, such as'
|
||||
' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.'
|
||||
@@ -302,6 +330,12 @@ def parseOpts(overrideArguments=None):
|
||||
'--youtube-skip-dash-manifest',
|
||||
action='store_false', dest='youtube_include_dash_manifest',
|
||||
help='Do not download the DASH manifest on YouTube videos')
|
||||
video_format.add_option(
|
||||
'--merge-output-format',
|
||||
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
|
||||
help=(
|
||||
'If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv.'
|
||||
'Ignored if no merge is required'))
|
||||
|
||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
||||
subtitles.add_option(
|
||||
@@ -482,6 +516,14 @@ def parseOpts(overrideArguments=None):
|
||||
'--print-traffic',
|
||||
dest='debug_printtraffic', action='store_true', default=False,
|
||||
help='Display sent and read HTTP traffic')
|
||||
verbosity.add_option(
|
||||
'-C', '--call-home',
|
||||
dest='call_home', action='store_true', default=False,
|
||||
help='Contact the youtube-dl server for debugging.')
|
||||
verbosity.add_option(
|
||||
'--no-call-home',
|
||||
dest='call_home', action='store_false', default=False,
|
||||
help='Do NOT contact the youtube-dl server for debugging.')
|
||||
|
||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
||||
filesystem.add_option(
|
||||
@@ -625,6 +667,13 @@ def parseOpts(overrideArguments=None):
|
||||
'--xattrs',
|
||||
action='store_true', dest='xattrs', default=False,
|
||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||
postproc.add_option(
|
||||
'--fixup',
|
||||
metavar='POLICY', dest='fixup', default='detect_or_warn',
|
||||
help='(experimental) Automatically correct known faults of the file. '
|
||||
'One of never (do nothing), warn (only emit a warning), '
|
||||
'detect_or_warn(check whether we can do anything about it, warn '
|
||||
'otherwise')
|
||||
postproc.add_option(
|
||||
'--prefer-avconv',
|
||||
action='store_false', dest='prefer_ffmpeg',
|
||||
@@ -639,6 +688,7 @@ def parseOpts(overrideArguments=None):
|
||||
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
|
||||
|
||||
parser.add_option_group(general)
|
||||
parser.add_option_group(network)
|
||||
parser.add_option_group(selection)
|
||||
parser.add_option_group(downloader)
|
||||
parser.add_option_group(filesystem)
|
||||
|
||||
@@ -6,6 +6,7 @@ from .ffmpeg import (
|
||||
FFmpegAudioFixPP,
|
||||
FFmpegEmbedSubtitlePP,
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegFixupStretchedPP,
|
||||
FFmpegMergerPP,
|
||||
FFmpegMetadataPP,
|
||||
FFmpegVideoConvertorPP,
|
||||
@@ -24,6 +25,7 @@ __all__ = [
|
||||
'FFmpegAudioFixPP',
|
||||
'FFmpegEmbedSubtitlePP',
|
||||
'FFmpegExtractAudioPP',
|
||||
'FFmpegFixupStretchedPP',
|
||||
'FFmpegMergerPP',
|
||||
'FFmpegMetadataPP',
|
||||
'FFmpegPostProcessor',
|
||||
|
||||
@@ -50,6 +50,10 @@ class FFmpegPostProcessor(PostProcessor):
|
||||
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
||||
return dict((p, get_exe_version(p, args=['-version'])) for p in programs)
|
||||
|
||||
@property
|
||||
def available(self):
|
||||
return self._executable is not None
|
||||
|
||||
@property
|
||||
def _executable(self):
|
||||
if self._downloader.params.get('prefer_ffmpeg', False):
|
||||
@@ -78,12 +82,15 @@ class FFmpegPostProcessor(PostProcessor):
|
||||
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
|
||||
self.check_version()
|
||||
|
||||
oldest_mtime = min(
|
||||
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
|
||||
|
||||
files_cmd = []
|
||||
for path in input_paths:
|
||||
files_cmd.extend([encodeArgument('-i'), encodeFilename(path, True)])
|
||||
cmd = ([encodeFilename(self._executable, True), encodeArgument('-y')] +
|
||||
files_cmd
|
||||
+ [encodeArgument(o) for o in opts] +
|
||||
files_cmd +
|
||||
[encodeArgument(o) for o in opts] +
|
||||
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
|
||||
|
||||
if self._downloader.params.get('verbose', False):
|
||||
@@ -94,6 +101,7 @@ class FFmpegPostProcessor(PostProcessor):
|
||||
stderr = stderr.decode('utf-8', 'replace')
|
||||
msg = stderr.strip().split('\n')[-1]
|
||||
raise FFmpegPostProcessorError(msg)
|
||||
os.utime(encodeFilename(out_path), (oldest_mtime, oldest_mtime))
|
||||
if self._deletetempfiles:
|
||||
for ipath in input_paths:
|
||||
os.remove(ipath)
|
||||
@@ -467,15 +475,21 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
|
||||
filename = information['filepath']
|
||||
input_files = [filename] + [subtitles_filename(filename, lang, self._subformat) for lang in sub_langs]
|
||||
|
||||
opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
|
||||
opts = [
|
||||
'-map', '0',
|
||||
'-c', 'copy',
|
||||
# Don't copy the existing subtitles, we may be running the
|
||||
# postprocessor a second time
|
||||
'-map', '-0:s',
|
||||
'-c:s', 'mov_text',
|
||||
]
|
||||
for (i, lang) in enumerate(sub_langs):
|
||||
opts.extend(['-map', '%d:0' % (i + 1), '-c:s:%d' % i, 'mov_text'])
|
||||
opts.extend(['-map', '%d:0' % (i + 1)])
|
||||
lang_code = self._conver_lang_code(lang)
|
||||
if lang_code is not None:
|
||||
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
|
||||
opts.extend(['-f', 'mp4'])
|
||||
|
||||
temp_filename = filename + '.temp'
|
||||
temp_filename = prepend_extension(filename, 'temp')
|
||||
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
|
||||
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
|
||||
os.remove(encodeFilename(filename))
|
||||
@@ -540,3 +554,22 @@ class FFmpegAudioFixPP(FFmpegPostProcessor):
|
||||
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
||||
|
||||
return True, info
|
||||
|
||||
|
||||
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
|
||||
def run(self, info):
|
||||
stretched_ratio = info.get('stretched_ratio')
|
||||
if stretched_ratio is None or stretched_ratio == 1:
|
||||
return
|
||||
|
||||
filename = info['filepath']
|
||||
temp_filename = prepend_extension(filename, 'temp')
|
||||
|
||||
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
|
||||
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
|
||||
self.run_ffmpeg(filename, temp_filename, options)
|
||||
|
||||
os.remove(encodeFilename(filename))
|
||||
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
|
||||
|
||||
return True, info
|
||||
|
||||
@@ -59,7 +59,7 @@ def update_self(to_screen, verbose):
|
||||
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
|
||||
return
|
||||
|
||||
https_handler = make_HTTPS_handler(False)
|
||||
https_handler = make_HTTPS_handler({})
|
||||
opener = compat_urllib_request.build_opener(https_handler)
|
||||
|
||||
# Check if there is a new version
|
||||
|
||||
@@ -10,6 +10,7 @@ import ctypes
|
||||
import datetime
|
||||
import email.utils
|
||||
import errno
|
||||
import functools
|
||||
import gzip
|
||||
import itertools
|
||||
import io
|
||||
@@ -34,7 +35,9 @@ from .compat import (
|
||||
compat_chr,
|
||||
compat_getenv,
|
||||
compat_html_entities,
|
||||
compat_http_client,
|
||||
compat_parse_qs,
|
||||
compat_socket_create_connection,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
@@ -205,6 +208,10 @@ def get_element_by_attribute(attribute, value, html):
|
||||
|
||||
def clean_html(html):
|
||||
"""Clean an HTML snippet into a readable string"""
|
||||
|
||||
if html is None: # Convenience for sanitizing descriptions etc.
|
||||
return html
|
||||
|
||||
# Newline vs <br />
|
||||
html = html.replace('\n', ' ')
|
||||
html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
|
||||
@@ -280,6 +287,8 @@ def sanitize_filename(s, restricted=False, is_id=False):
|
||||
return '_'
|
||||
return char
|
||||
|
||||
# Handle timestamps
|
||||
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
|
||||
result = ''.join(map(replace_insane, s))
|
||||
if not is_id:
|
||||
while '__' in result:
|
||||
@@ -387,13 +396,15 @@ def formatSeconds(secs):
|
||||
return '%d' % secs
|
||||
|
||||
|
||||
def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
|
||||
def make_HTTPS_handler(params, **kwargs):
|
||||
opts_no_check_certificate = params.get('nocheckcertificate', False)
|
||||
if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
|
||||
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
|
||||
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
|
||||
if opts_no_check_certificate:
|
||||
context.check_hostname = False
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
try:
|
||||
return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
|
||||
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
|
||||
except TypeError:
|
||||
# Python 2.7.8
|
||||
# (create_default_context present but HTTPSHandler has no context=)
|
||||
@@ -416,17 +427,14 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
|
||||
except ssl.SSLError:
|
||||
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23)
|
||||
|
||||
class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler):
|
||||
def https_open(self, req):
|
||||
return self.do_open(HTTPSConnectionV3, req)
|
||||
return HTTPSHandlerV3(**kwargs)
|
||||
return YoutubeDLHTTPSHandler(params, https_conn_class=HTTPSConnectionV3, **kwargs)
|
||||
else: # Python < 3.4
|
||||
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
context.verify_mode = (ssl.CERT_NONE
|
||||
if opts_no_check_certificate
|
||||
else ssl.CERT_REQUIRED)
|
||||
context.set_default_verify_paths()
|
||||
return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
|
||||
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
|
||||
|
||||
|
||||
class ExtractorError(Exception):
|
||||
@@ -540,6 +548,26 @@ class ContentTooShortError(Exception):
|
||||
self.expected = expected
|
||||
|
||||
|
||||
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
|
||||
hc = http_class(*args, **kwargs)
|
||||
source_address = ydl_handler._params.get('source_address')
|
||||
if source_address is not None:
|
||||
sa = (source_address, 0)
|
||||
if hasattr(hc, 'source_address'): # Python 2.7+
|
||||
hc.source_address = sa
|
||||
else: # Python 2.6
|
||||
def _hc_connect(self, *args, **kwargs):
|
||||
sock = compat_socket_create_connection(
|
||||
(self.host, self.port), self.timeout, sa)
|
||||
if is_https:
|
||||
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
|
||||
else:
|
||||
self.sock = sock
|
||||
hc.connect = functools.partial(_hc_connect, hc)
|
||||
|
||||
return hc
|
||||
|
||||
|
||||
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
||||
"""Handler for HTTP requests and responses.
|
||||
|
||||
@@ -558,6 +586,15 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
||||
public domain.
|
||||
"""
|
||||
|
||||
def __init__(self, params, *args, **kwargs):
|
||||
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
|
||||
self._params = params
|
||||
|
||||
def http_open(self, req):
|
||||
return self.do_open(functools.partial(
|
||||
_create_http_connection, self, compat_http_client.HTTPConnection, False),
|
||||
req)
|
||||
|
||||
@staticmethod
|
||||
def deflate(data):
|
||||
try:
|
||||
@@ -627,6 +664,18 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
||||
https_response = http_response
|
||||
|
||||
|
||||
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
|
||||
def __init__(self, params, https_conn_class=None, *args, **kwargs):
|
||||
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
|
||||
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
|
||||
self._params = params
|
||||
|
||||
def https_open(self, req):
|
||||
return self.do_open(functools.partial(
|
||||
_create_http_connection, self, self._https_conn_class, True),
|
||||
req)
|
||||
|
||||
|
||||
def parse_iso8601(date_str, delimiter='T'):
|
||||
""" Return a UNIX timestamp from the given date """
|
||||
|
||||
@@ -673,11 +722,9 @@ def unified_strdate(date_str, day_first=True):
|
||||
'%b %dst %Y %I:%M%p',
|
||||
'%b %dnd %Y %I:%M%p',
|
||||
'%b %dth %Y %I:%M%p',
|
||||
'%Y %m %d',
|
||||
'%Y-%m-%d',
|
||||
'%Y/%m/%d',
|
||||
'%d.%m.%Y',
|
||||
'%d/%m/%Y',
|
||||
'%d/%m/%y',
|
||||
'%Y/%m/%d %H:%M:%S',
|
||||
'%Y-%m-%d %H:%M:%S',
|
||||
'%Y-%m-%d %H:%M:%S.%f',
|
||||
@@ -692,10 +739,16 @@ def unified_strdate(date_str, day_first=True):
|
||||
]
|
||||
if day_first:
|
||||
format_expressions.extend([
|
||||
'%d.%m.%Y',
|
||||
'%d/%m/%Y',
|
||||
'%d/%m/%y',
|
||||
'%d/%m/%Y %H:%M:%S',
|
||||
])
|
||||
else:
|
||||
format_expressions.extend([
|
||||
'%m.%d.%Y',
|
||||
'%m/%d/%Y',
|
||||
'%m/%d/%y',
|
||||
'%m/%d/%Y %H:%M:%S',
|
||||
])
|
||||
for expression in format_expressions:
|
||||
@@ -1218,13 +1271,13 @@ def float_or_none(v, scale=1, invscale=1, default=None):
|
||||
|
||||
|
||||
def parse_duration(s):
|
||||
if s is None:
|
||||
if not isinstance(s, basestring if sys.version_info < (3, 0) else compat_str):
|
||||
return None
|
||||
|
||||
s = s.strip()
|
||||
|
||||
m = re.match(
|
||||
r'''(?ix)T?
|
||||
r'''(?ix)(?:P?T)?
|
||||
(?:
|
||||
(?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*|
|
||||
(?P<only_hours>[0-9.]+)\s*(?:hours?)|
|
||||
@@ -1559,6 +1612,14 @@ def urlhandle_detect_ext(url_handle):
|
||||
except AttributeError: # Python < 3
|
||||
getheader = url_handle.info().getheader
|
||||
|
||||
cd = getheader('Content-Disposition')
|
||||
if cd:
|
||||
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
|
||||
if m:
|
||||
e = determine_ext(m.group('filename'), default_ext=None)
|
||||
if e:
|
||||
return e
|
||||
|
||||
return getheader('Content-Type').split("/")[1]
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '2015.01.09'
|
||||
__version__ = '2015.01.23'
|
||||
|
||||
Reference in New Issue
Block a user