Compare commits
689 Commits
2013.12.16
...
2014.02.19
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c377110539 | ||
|
|
a9c7198a0b | ||
|
|
f6f01ea17b | ||
|
|
f2d0fc6823 | ||
|
|
f7000f3a1b | ||
|
|
c7f0177fa7 | ||
|
|
09c4d50944 | ||
|
|
2eb5d315d4 | ||
|
|
ad5976b4d9 | ||
|
|
a0dfcdce5e | ||
|
|
96d1637082 | ||
|
|
960f317171 | ||
|
|
4412ca751d | ||
|
|
cbffec0c95 | ||
|
|
0cea52cc18 | ||
|
|
6d784e87f4 | ||
|
|
ae6cae78f1 | ||
|
|
0f99566c01 | ||
|
|
2db806b4aa | ||
|
|
3f32c0ba4c | ||
|
|
541cb26c0d | ||
|
|
5544e038ab | ||
|
|
9032dc28a6 | ||
|
|
03635e2a71 | ||
|
|
00cf938aa5 | ||
|
|
a5f707c495 | ||
|
|
1824b48169 | ||
|
|
07ad22b8af | ||
|
|
b53466e168 | ||
|
|
6a7a389679 | ||
|
|
4edff78531 | ||
|
|
99043c2ea5 | ||
|
|
e68abba910 | ||
|
|
3165dc4d9f | ||
|
|
66c43a53e4 | ||
|
|
463b334616 | ||
|
|
b71dbc57c4 | ||
|
|
72ca1d7f45 | ||
|
|
76e461f395 | ||
|
|
1074982e6e | ||
|
|
29b2aaf035 | ||
|
|
6f90d098c5 | ||
|
|
0715161450 | ||
|
|
896583517f | ||
|
|
713d31fac8 | ||
|
|
96cb10a5f5 | ||
|
|
c207c1044e | ||
|
|
79629ec717 | ||
|
|
008fda0f08 | ||
|
|
0ae6b01937 | ||
|
|
def630e523 | ||
|
|
c5ba203e23 | ||
|
|
2317e6b2b3 | ||
|
|
cb38928974 | ||
|
|
fa78f13302 | ||
|
|
18395217c4 | ||
|
|
34bd987811 | ||
|
|
af6ba6a1c4 | ||
|
|
85409a0c69 | ||
|
|
ebfe352b62 | ||
|
|
fde56d2f17 | ||
|
|
3501423dfe | ||
|
|
0de668af51 | ||
|
|
2a584ea90a | ||
|
|
0f6ed94a15 | ||
|
|
bcb891e82b | ||
|
|
ac6e4ca1ed | ||
|
|
2e20bba708 | ||
|
|
e70dc1d14b | ||
|
|
0793a7b3c7 | ||
|
|
026fcc0495 | ||
|
|
81c2f20b53 | ||
|
|
1afe753462 | ||
|
|
524c2c716a | ||
|
|
b542d4bbd7 | ||
|
|
cf1eb45153 | ||
|
|
a97bcd80ba | ||
|
|
17968e444c | ||
|
|
2e3fd9ec2f | ||
|
|
d6a283b025 | ||
|
|
9766538124 | ||
|
|
98dbee8681 | ||
|
|
e421491b3b | ||
|
|
6828d37c41 | ||
|
|
bf5f610099 | ||
|
|
8b7f73404a | ||
|
|
85cacb2f51 | ||
|
|
b3fa3917e2 | ||
|
|
082c6c867a | ||
|
|
03fcf1ab57 | ||
|
|
3b00dea5eb | ||
|
|
8bc6c8e3c0 | ||
|
|
79bc27b53a | ||
|
|
84dd703199 | ||
|
|
c6fdba23a6 | ||
|
|
b19fe521a9 | ||
|
|
c1e672d121 | ||
|
|
f4371f4784 | ||
|
|
d914d9d187 | ||
|
|
845d14d377 | ||
|
|
4a9540b6d2 | ||
|
|
9f31be7000 | ||
|
|
41fa1b627d | ||
|
|
c0c4e66b29 | ||
|
|
cd8662de22 | ||
|
|
3587159614 | ||
|
|
d67cc9fa7c | ||
|
|
bf3a2fe923 | ||
|
|
e9ea0bf123 | ||
|
|
63424b6233 | ||
|
|
0bf35c5cf5 | ||
|
|
95c29381eb | ||
|
|
94c4abce7f | ||
|
|
f2dffe55f8 | ||
|
|
46a073bfac | ||
|
|
df872ec4e7 | ||
|
|
5de90176d9 | ||
|
|
dcf3eec47a | ||
|
|
e9e4f30d26 | ||
|
|
83cebd73d4 | ||
|
|
1df4229bd7 | ||
|
|
3c995527e9 | ||
|
|
7c62b568a2 | ||
|
|
ccf9114e84 | ||
|
|
d8061908bb | ||
|
|
211e17dd43 | ||
|
|
6cb38a9994 | ||
|
|
fa7df757a7 | ||
|
|
8c82077619 | ||
|
|
e5d1f9e50a | ||
|
|
7ee50ae7b5 | ||
|
|
de563c9da0 | ||
|
|
50451f2a18 | ||
|
|
9bc70948e1 | ||
|
|
5dc733f071 | ||
|
|
bc4850908c | ||
|
|
20650c8654 | ||
|
|
56dced2670 | ||
|
|
eef726c04b | ||
|
|
acf1555d76 | ||
|
|
22e7f1a6ec | ||
|
|
3c49325658 | ||
|
|
bb1cd2bea1 | ||
|
|
fdf1f8d4ce | ||
|
|
117c8c6b97 | ||
|
|
5cef4ff09b | ||
|
|
91264ce572 | ||
|
|
c79ef8e1ae | ||
|
|
58d915df51 | ||
|
|
7881a64499 | ||
|
|
90159f5561 | ||
|
|
99877772d0 | ||
|
|
b0268cb6ce | ||
|
|
4edff4cfa8 | ||
|
|
1eac553e7e | ||
|
|
9d3ac7444d | ||
|
|
588128d054 | ||
|
|
8e93b9b9aa | ||
|
|
b4bcffefa3 | ||
|
|
2b39af9b4f | ||
|
|
23fe495feb | ||
|
|
b5dbe89bba | ||
|
|
dbe80ca7ad | ||
|
|
009a3408f5 | ||
|
|
b58e3c8918 | ||
|
|
56b6faf91e | ||
|
|
7ac1f877a7 | ||
|
|
d55433bbfd | ||
|
|
f0ce2bc1c5 | ||
|
|
c3bc00b90e | ||
|
|
ff6b7b049b | ||
|
|
f46359121f | ||
|
|
37c1525c17 | ||
|
|
c85e4cf7b4 | ||
|
|
c66dcda287 | ||
|
|
6d845922ab | ||
|
|
2949cbe036 | ||
|
|
c3309a7774 | ||
|
|
7aed837595 | ||
|
|
0eb799bae9 | ||
|
|
4baff4a4ae | ||
|
|
45d7bc2f8b | ||
|
|
c0c2ddddcd | ||
|
|
a96ed91610 | ||
|
|
c1206423c4 | ||
|
|
659aa21ba1 | ||
|
|
efd02e858a | ||
|
|
3bf8bc7f37 | ||
|
|
8ccda826d5 | ||
|
|
b9381e43c2 | ||
|
|
fcdea2666d | ||
|
|
c4db377cbb | ||
|
|
90dc5e8693 | ||
|
|
c81a855b0f | ||
|
|
c8d8ec8567 | ||
|
|
4f879a5be0 | ||
|
|
1a0648b4a9 | ||
|
|
3c1b4669d0 | ||
|
|
24b3d5e538 | ||
|
|
ab083b08ab | ||
|
|
89acb96927 | ||
|
|
79752e18b1 | ||
|
|
55b41c723c | ||
|
|
9f8928d032 | ||
|
|
3effa7ceaa | ||
|
|
ed9cc2f1e0 | ||
|
|
975fa541c2 | ||
|
|
251974e44c | ||
|
|
38a40276ec | ||
|
|
57b6288358 | ||
|
|
c3f51436bf | ||
|
|
0c708f11cb | ||
|
|
fb2a706d11 | ||
|
|
0b76600deb | ||
|
|
245b612a36 | ||
|
|
d882161d5a | ||
|
|
d4a21e0b49 | ||
|
|
26a78d4bbf | ||
|
|
8db69786c2 | ||
|
|
b11cec4162 | ||
|
|
7eeb5bef24 | ||
|
|
9d2032932c | ||
|
|
6490306017 | ||
|
|
ceb2b7d257 | ||
|
|
459a53c2c2 | ||
|
|
adc267eebf | ||
|
|
ffe8f62d27 | ||
|
|
ed85007039 | ||
|
|
5aaca50d60 | ||
|
|
869baf3565 | ||
|
|
e299f6d27f | ||
|
|
4a192f817e | ||
|
|
bc1d1a5a71 | ||
|
|
456895d9cf | ||
|
|
218c15ab59 | ||
|
|
17ab4d3b5e | ||
|
|
31ef0ff038 | ||
|
|
37e3b90d59 | ||
|
|
00ff8f92a5 | ||
|
|
4857beba3a | ||
|
|
c1e60cc2bf | ||
|
|
98669ed79c | ||
|
|
a3978a6159 | ||
|
|
e3a9f32f52 | ||
|
|
87fac3238d | ||
|
|
a2fb2a2134 | ||
|
|
9e8ee54553 | ||
|
|
117bec936c | ||
|
|
1547c8cc88 | ||
|
|
075911d48e | ||
|
|
b21a918984 | ||
|
|
f9b8549609 | ||
|
|
d1b30713fb | ||
|
|
e2ba07024f | ||
|
|
9b05bd42e5 | ||
|
|
b6d3a99678 | ||
|
|
96d7b8873a | ||
|
|
efc867775e | ||
|
|
5ab772f09c | ||
|
|
2a89386232 | ||
|
|
4d9be98dbc | ||
|
|
6737907826 | ||
|
|
c060b77446 | ||
|
|
7e8caf30c0 | ||
|
|
ca3e054750 | ||
|
|
1da1558f46 | ||
|
|
25c67d257c | ||
|
|
a17d16d59c | ||
|
|
d16076ff3e | ||
|
|
6c57e8a063 | ||
|
|
db1f388878 | ||
|
|
0f2999fe2b | ||
|
|
53bfd6b24c | ||
|
|
5700e7792a | ||
|
|
38c2e5b8d5 | ||
|
|
48f9678a32 | ||
|
|
beddbc2ad1 | ||
|
|
f89197d73e | ||
|
|
944d65c762 | ||
|
|
f945612bd0 | ||
|
|
59188de113 | ||
|
|
352d08e3e5 | ||
|
|
bacb5e4f44 | ||
|
|
008af8660b | ||
|
|
886fa72324 | ||
|
|
2c5bae429a | ||
|
|
f265fc1238 | ||
|
|
1394ce65b4 | ||
|
|
67ccb77197 | ||
|
|
63ef36e8d8 | ||
|
|
0b65e5d40f | ||
|
|
629be17af4 | ||
|
|
fd28827864 | ||
|
|
8c61d9a9b1 | ||
|
|
975d35dbab | ||
|
|
8b769664c4 | ||
|
|
76f270a46a | ||
|
|
9dab1b7f28 | ||
|
|
d3e5bbf437 | ||
|
|
18a25c5d78 | ||
|
|
924f47f7b6 | ||
|
|
22ff1c4a93 | ||
|
|
35409e1101 | ||
|
|
65d781128a | ||
|
|
c35b1b07e2 | ||
|
|
066f6a0630 | ||
|
|
12ed57418c | ||
|
|
8b1be5cd73 | ||
|
|
780083dbc6 | ||
|
|
4919603f66 | ||
|
|
dd26ced164 | ||
|
|
bd2d82a5d3 | ||
|
|
c4cd138b92 | ||
|
|
65697b3bf3 | ||
|
|
50317b111d | ||
|
|
d7975ea287 | ||
|
|
714d709a31 | ||
|
|
11577ec054 | ||
|
|
79bf58f9b5 | ||
|
|
cd8a562267 | ||
|
|
de3ef3ed58 | ||
|
|
8908741806 | ||
|
|
ba7678f9cc | ||
|
|
a70c83768e | ||
|
|
04b4d394d9 | ||
|
|
130f12985a | ||
|
|
4ca5d43cd8 | ||
|
|
4bbf139aa7 | ||
|
|
47739636a9 | ||
|
|
407ae733ab | ||
|
|
c39f7013e1 | ||
|
|
a4a028323e | ||
|
|
780ee4e501 | ||
|
|
d7b51547c0 | ||
|
|
43030f36db | ||
|
|
48c63f1653 | ||
|
|
90f479b6d5 | ||
|
|
6fd2957163 | ||
|
|
d3a1c71917 | ||
|
|
af1588c05f | ||
|
|
2250865fb0 | ||
|
|
99f770caa8 | ||
|
|
00122de6a9 | ||
|
|
a70515c0fd | ||
|
|
398edd0689 | ||
|
|
6562df768d | ||
|
|
06769acd71 | ||
|
|
32dac6943d | ||
|
|
90834c78fe | ||
|
|
47917f24c4 | ||
|
|
d614aa40e3 | ||
|
|
bc4ba05fcb | ||
|
|
8d9453b9e8 | ||
|
|
e4f320a4d0 | ||
|
|
ef9f2ba7af | ||
|
|
4a3b72771f | ||
|
|
913f32929b | ||
|
|
9834872bf6 | ||
|
|
94a23d2a1e | ||
|
|
608bf69880 | ||
|
|
032b3df5af | ||
|
|
9d11a41fe4 | ||
|
|
2989501131 | ||
|
|
7b0817e8e1 | ||
|
|
9d4288b2d4 | ||
|
|
3486df383b | ||
|
|
b60016e831 | ||
|
|
5aafe895fc | ||
|
|
b853d2e155 | ||
|
|
b7ab059084 | ||
|
|
c91778f8c0 | ||
|
|
5016f3eac8 | ||
|
|
efb1bb90a0 | ||
|
|
4cf393bb4b | ||
|
|
ce4e242a6f | ||
|
|
b27bec212f | ||
|
|
704519c7e3 | ||
|
|
6b79f40c3d | ||
|
|
dd27fd1739 | ||
|
|
dfa50793d8 | ||
|
|
2a7c35dd46 | ||
|
|
f2ffd10bb2 | ||
|
|
8da531359e | ||
|
|
e2b944cf43 | ||
|
|
3ec05685f7 | ||
|
|
e103fd46ca | ||
|
|
877bfd69d1 | ||
|
|
e0ef49f205 | ||
|
|
f68cd00fe3 | ||
|
|
ca70d215cf | ||
|
|
d0390a0c92 | ||
|
|
dd2535c38a | ||
|
|
b78d180170 | ||
|
|
26dca1661e | ||
|
|
f853f8594d | ||
|
|
8307aa73fb | ||
|
|
d0da491e1e | ||
|
|
6e249060cf | ||
|
|
fbcd7b5f83 | ||
|
|
9ac0a67581 | ||
|
|
befdc8f3b6 | ||
|
|
bb198c95e2 | ||
|
|
c1195541b7 | ||
|
|
26844eb57b | ||
|
|
a7732b672e | ||
|
|
677b3ce82f | ||
|
|
fabfe17d5e | ||
|
|
82696d5d5d | ||
|
|
9eea4fb835 | ||
|
|
484aaeb204 | ||
|
|
8e589a8a47 | ||
|
|
2f21eb2db6 | ||
|
|
c11529618a | ||
|
|
58c3c7ae38 | ||
|
|
c8650f7ecd | ||
|
|
14e7543a5a | ||
|
|
bf6705f584 | ||
|
|
a9f53ce7ea | ||
|
|
a45ea17042 | ||
|
|
4950f30890 | ||
|
|
7df7f00385 | ||
|
|
d2250ea7fd | ||
|
|
17093b83ca | ||
|
|
5d8683a5cd | ||
|
|
cede88e5bb | ||
|
|
aadc71642a | ||
|
|
67d28bff12 | ||
|
|
7ee40b5d1c | ||
|
|
db22af36ec | ||
|
|
f8b5ab8cfa | ||
|
|
298f16f954 | ||
|
|
3d97cbbdaf | ||
|
|
ce6b9a2dba | ||
|
|
c3197e3e5c | ||
|
|
d420d8dd1b | ||
|
|
3fabeaa1f4 | ||
|
|
35aa7098cd | ||
|
|
9d6192a5b8 | ||
|
|
76b1bd672d | ||
|
|
469ec9416a | ||
|
|
70af3439e9 | ||
|
|
bb3c20965e | ||
|
|
5f59ee7942 | ||
|
|
8f89e68781 | ||
|
|
10bff13a66 | ||
|
|
166ff8a3c7 | ||
|
|
b4622a328b | ||
|
|
cc253000e4 | ||
|
|
42e4fcf23a | ||
|
|
9c63128668 | ||
|
|
9933b57430 | ||
|
|
84c92dc00f | ||
|
|
42154ad5bc | ||
|
|
96f1b0741c | ||
|
|
bac268e243 | ||
|
|
3798eadccd | ||
|
|
2537186d43 | ||
|
|
0eecc6a417 | ||
|
|
0dc13f4c4a | ||
|
|
f577e0ce15 | ||
|
|
bd1b906527 | ||
|
|
ecfef3e5bf | ||
|
|
3d3538e422 | ||
|
|
0cdad20c75 | ||
|
|
50144133c5 | ||
|
|
089cb705e8 | ||
|
|
525e1076ad | ||
|
|
282962bd36 | ||
|
|
c93c2ab1c3 | ||
|
|
7b09a4d847 | ||
|
|
73a25b30ea | ||
|
|
ac260dd81e | ||
|
|
48a2034671 | ||
|
|
a9ce0c631e | ||
|
|
afc7bc33cb | ||
|
|
168da92b9a | ||
|
|
d70ad093af | ||
|
|
2a2e2770cc | ||
|
|
42cc71e80b | ||
|
|
496c19234c | ||
|
|
4f81667d76 | ||
|
|
56327689a2 | ||
|
|
ad84831537 | ||
|
|
5f263296ea | ||
|
|
89650ea3a6 | ||
|
|
79f8295303 | ||
|
|
400e58103d | ||
|
|
fcee8ee784 | ||
|
|
9148eb002b | ||
|
|
559e370f44 | ||
|
|
cdeb10b5cd | ||
|
|
e6162a90e6 | ||
|
|
9a6422a81e | ||
|
|
fcea44c6d5 | ||
|
|
5d73273f6f | ||
|
|
c11a0611d9 | ||
|
|
796495886e | ||
|
|
fa27f667c8 | ||
|
|
fc9713a1d2 | ||
|
|
62bcfa8c57 | ||
|
|
7f9886379c | ||
|
|
c6e4b225b1 | ||
|
|
1c0f31f9f7 | ||
|
|
41292a3827 | ||
|
|
20f1be02df | ||
|
|
a339e5cfb5 | ||
|
|
f46f4a995b | ||
|
|
4ddba33f78 | ||
|
|
e3b7aa8428 | ||
|
|
d981cef6b9 | ||
|
|
6fa81ee96e | ||
|
|
a1a337ade9 | ||
|
|
c774b3c696 | ||
|
|
3e34db3170 | ||
|
|
317d4edfa8 | ||
|
|
9b12003c35 | ||
|
|
4ea170b8a0 | ||
|
|
49f2bf76a8 | ||
|
|
01c62591d1 | ||
|
|
1e91866f77 | ||
|
|
9656ee5d1d | ||
|
|
a5f1e12a02 | ||
|
|
ca9e792253 | ||
|
|
aff24732b9 | ||
|
|
455fa214b6 | ||
|
|
a9c5e5ca6e | ||
|
|
cefcb9fde3 | ||
|
|
bca4e93076 | ||
|
|
67c20aebb7 | ||
|
|
448711e39f | ||
|
|
8bf48f237d | ||
|
|
7c0578dc86 | ||
|
|
55033ffb0a | ||
|
|
b4a9bf701a | ||
|
|
a015dce0e2 | ||
|
|
28ab2e48ae | ||
|
|
6febd1c1df | ||
|
|
6350728be2 | ||
|
|
a7c26e7338 | ||
|
|
c880557666 | ||
|
|
85689a531f | ||
|
|
cc14dfb8ec | ||
|
|
91d7d0b333 | ||
|
|
9887c9b2d6 | ||
|
|
d2fee313ec | ||
|
|
fa7f58e433 | ||
|
|
71cd2a571e | ||
|
|
7c094bfe2f | ||
|
|
0f30658329 | ||
|
|
31c1cf5a9d | ||
|
|
e63fc1bed4 | ||
|
|
efa1739b74 | ||
|
|
5ffecde73f | ||
|
|
08d13955dd | ||
|
|
531147dd5e | ||
|
|
a17c95f5e4 | ||
|
|
eadaf08c16 | ||
|
|
4a9c9b6fdb | ||
|
|
b969ab48d9 | ||
|
|
8fa8a6299b | ||
|
|
b2b0870b3a | ||
|
|
4fb757d1e0 | ||
|
|
241bce7aaf | ||
|
|
33ec2ae8d9 | ||
|
|
c801b2051a | ||
|
|
7976fcac55 | ||
|
|
e9f9a10fba | ||
|
|
1cdfc31e1f | ||
|
|
19dab5e6cc | ||
|
|
c0f9969b9e | ||
|
|
a0ddb8a2fa | ||
|
|
c1d1facd06 | ||
|
|
b26559878f | ||
|
|
fd46a318a2 | ||
|
|
5d4f3985be | ||
|
|
360babf799 | ||
|
|
a1b92edbb3 | ||
|
|
12c978739a | ||
|
|
4bc60dafeb | ||
|
|
bf5b0a1bfb | ||
|
|
bfe9de8510 | ||
|
|
5ecd3c6a09 | ||
|
|
608d11f515 | ||
|
|
c7f8537dd9 | ||
|
|
723f839911 | ||
|
|
61224dbcdd | ||
|
|
c3afc93a69 | ||
|
|
7b8af56340 | ||
|
|
539179f45b | ||
|
|
7217e148fb | ||
|
|
d29b5e812b | ||
|
|
1e923b0d29 | ||
|
|
f7e9d77f34 | ||
|
|
41cc67c542 | ||
|
|
c645c7658d | ||
|
|
b874fe2da8 | ||
|
|
c7deaa4c74 | ||
|
|
e6812ac99d | ||
|
|
719d3927d7 | ||
|
|
55e663a8d7 | ||
|
|
2c62dc26c8 | ||
|
|
3d4a70b821 | ||
|
|
4bcc7bd1f2 | ||
|
|
f49d89ee04 | ||
|
|
dabc127362 | ||
|
|
c25c991809 | ||
|
|
f45f96f8f8 | ||
|
|
1538eff6d8 | ||
|
|
00b2685b9c | ||
|
|
8e3e03229e | ||
|
|
9d8d675e0e | ||
|
|
933605d7e8 | ||
|
|
b3d9ef88ec | ||
|
|
8958b6916c | ||
|
|
9fc3bef87a | ||
|
|
d80044c235 | ||
|
|
bc2103f3bf | ||
|
|
f82b18efc1 | ||
|
|
504c668d3b | ||
|
|
466617f539 | ||
|
|
196938835a | ||
|
|
a94e129a65 | ||
|
|
5d681e960d | ||
|
|
c7b487d96b | ||
|
|
7dbf5ae587 | ||
|
|
8d0bdeba18 | ||
|
|
1b969041d7 | ||
|
|
e302f9ce32 | ||
|
|
5a94982abe | ||
|
|
7115ca84aa | ||
|
|
04ff34ab89 | ||
|
|
bbafbe20c2 | ||
|
|
c4d55a33fc | ||
|
|
147e4aece0 | ||
|
|
bd1488ae64 | ||
|
|
79fed2a4df | ||
|
|
304cbe981e | ||
|
|
3fefbf50e3 | ||
|
|
f65c1d2be0 | ||
|
|
aa94a6d315 | ||
|
|
768df74538 | ||
|
|
1f9da9049b | ||
|
|
c0d0b01f0e | ||
|
|
7c86a5b864 | ||
|
|
97e302a419 | ||
|
|
71507a11c8 | ||
|
|
a51e37af62 | ||
|
|
1fb8f09273 | ||
|
|
6c6db72ed4 | ||
|
|
0cc83dc54b | ||
|
|
5ce54a8205 | ||
|
|
8c21b7c647 | ||
|
|
77aa6b329d | ||
|
|
62d68c43ed | ||
|
|
bfaae0a768 | ||
|
|
e56f22ae20 | ||
|
|
dbd1988ed9 | ||
|
|
4ea3be0a5c | ||
|
|
3e78514568 | ||
|
|
e029b8bd43 | ||
|
|
f5567e401c | ||
|
|
9b8aaeed85 | ||
|
|
6086d121cb | ||
|
|
7de6e075b4 | ||
|
|
946135aa2a | ||
|
|
42393ce234 | ||
|
|
d6c7a367e8 | ||
|
|
cecaaf3f58 | ||
|
|
f09828b4e1 | ||
|
|
29eb517403 | ||
|
|
44c471c3b8 | ||
|
|
46374a56b2 | ||
|
|
ec98946ef9 | ||
|
|
fa77b742ac | ||
|
|
8b4e274610 | ||
|
|
d6756d3758 | ||
|
|
11b68f6e1b | ||
|
|
88bb52ee18 | ||
|
|
d90df974c3 | ||
|
|
5c541b2cb7 | ||
|
|
87a28127d2 | ||
|
|
ebce53b3d8 | ||
|
|
83c632dc43 | ||
|
|
ff07a05575 | ||
|
|
f25571ffbf | ||
|
|
f7a6892572 | ||
|
|
3bc2ddccc8 | ||
|
|
8ab470f1b2 | ||
|
|
c8434e8316 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -23,6 +23,8 @@ updates_key.pem
|
|||||||
*.vtt
|
*.vtt
|
||||||
*.flv
|
*.flv
|
||||||
*.mp4
|
*.mp4
|
||||||
|
*.m4a
|
||||||
|
*.m4v
|
||||||
*.part
|
*.part
|
||||||
test/testdata
|
test/testdata
|
||||||
.tox
|
.tox
|
||||||
|
|||||||
312
README.md
312
README.md
@@ -16,109 +16,145 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
# OPTIONS
|
# OPTIONS
|
||||||
-h, --help print this help text and exit
|
-h, --help print this help text and exit
|
||||||
--version print program version and exit
|
--version print program version and exit
|
||||||
-U, --update update this program to latest version. Make sure
|
-U, --update update this program to latest version. Make
|
||||||
that you have sufficient permissions (run with
|
sure that you have sufficient permissions
|
||||||
sudo if needed)
|
(run with sudo if needed)
|
||||||
-i, --ignore-errors continue on download errors, for example to to
|
-i, --ignore-errors continue on download errors, for example to
|
||||||
skip unavailable videos in a playlist
|
to skip unavailable videos in a playlist
|
||||||
--abort-on-error Abort downloading of further videos (in the
|
--abort-on-error Abort downloading of further videos (in the
|
||||||
playlist or the command line) if an error occurs
|
playlist or the command line) if an error
|
||||||
|
occurs
|
||||||
--dump-user-agent display the current browser identification
|
--dump-user-agent display the current browser identification
|
||||||
--user-agent UA specify a custom user agent
|
--user-agent UA specify a custom user agent
|
||||||
--referer REF specify a custom referer, use if the video access
|
--referer REF specify a custom referer, use if the video
|
||||||
is restricted to one domain
|
access is restricted to one domain
|
||||||
--list-extractors List all supported extractors and the URLs they
|
--list-extractors List all supported extractors and the URLs
|
||||||
would handle
|
they would handle
|
||||||
--extractor-descriptions Output descriptions of all supported extractors
|
--extractor-descriptions Output descriptions of all supported
|
||||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in an
|
extractors
|
||||||
empty string (--proxy "") for direct connection
|
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in
|
||||||
|
an empty string (--proxy "") for direct
|
||||||
|
connection
|
||||||
--no-check-certificate Suppress HTTPS certificate validation.
|
--no-check-certificate Suppress HTTPS certificate validation.
|
||||||
--cache-dir DIR Location in the filesystem where youtube-dl can
|
--cache-dir DIR Location in the filesystem where youtube-dl
|
||||||
store downloaded information permanently. By
|
can store some downloaded information
|
||||||
default $XDG_CACHE_HOME/youtube-dl or ~/.cache
|
permanently. By default $XDG_CACHE_HOME
|
||||||
/youtube-dl .
|
/youtube-dl or ~/.cache/youtube-dl . At the
|
||||||
|
moment, only YouTube player files (for
|
||||||
|
videos with obfuscated signatures) are
|
||||||
|
cached, but that may change.
|
||||||
--no-cache-dir Disable filesystem caching
|
--no-cache-dir Disable filesystem caching
|
||||||
--bidi-workaround Work around terminals that lack bidirectional
|
--socket-timeout None Time to wait before giving up, in seconds
|
||||||
text support. Requires fribidi executable in PATH
|
--bidi-workaround Work around terminals that lack
|
||||||
|
bidirectional text support. Requires bidiv
|
||||||
|
or fribidi executable in PATH
|
||||||
|
--default-search PREFIX Use this prefix for unqualified URLs. For
|
||||||
|
example "gvsearch2:" downloads two videos
|
||||||
|
from google videos for youtube-dl "large
|
||||||
|
apple". By default (with value "auto")
|
||||||
|
youtube-dl guesses.
|
||||||
|
--ignore-config Do not read configuration files. When given
|
||||||
|
in the global configuration file /etc
|
||||||
|
/youtube-dl.conf: do not read the user
|
||||||
|
configuration in ~/.config/youtube-dl.conf
|
||||||
|
(%APPDATA%/youtube-dl/config.txt on
|
||||||
|
Windows)
|
||||||
|
|
||||||
## Video Selection:
|
## Video Selection:
|
||||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||||
--playlist-end NUMBER playlist video to end at (default is last)
|
--playlist-end NUMBER playlist video to end at (default is last)
|
||||||
--match-title REGEX download only matching titles (regex or caseless
|
--match-title REGEX download only matching titles (regex or
|
||||||
sub-string)
|
caseless sub-string)
|
||||||
--reject-title REGEX skip download for matching titles (regex or
|
--reject-title REGEX skip download for matching titles (regex or
|
||||||
caseless sub-string)
|
caseless sub-string)
|
||||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||||
--min-filesize SIZE Do not download any videos smaller than SIZE
|
--min-filesize SIZE Do not download any videos smaller than
|
||||||
|
SIZE (e.g. 50k or 44.6m)
|
||||||
|
--max-filesize SIZE Do not download any videos larger than SIZE
|
||||||
(e.g. 50k or 44.6m)
|
(e.g. 50k or 44.6m)
|
||||||
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
|
|
||||||
50k or 44.6m)
|
|
||||||
--date DATE download only videos uploaded in this date
|
--date DATE download only videos uploaded in this date
|
||||||
--datebefore DATE download only videos uploaded before this date
|
--datebefore DATE download only videos uploaded on or before
|
||||||
--dateafter DATE download only videos uploaded after this date
|
this date (i.e. inclusive)
|
||||||
--min-views COUNT Do not download any videos with less than COUNT
|
--dateafter DATE download only videos uploaded on or after
|
||||||
views
|
this date (i.e. inclusive)
|
||||||
--max-views COUNT Do not download any videos with more than COUNT
|
--min-views COUNT Do not download any videos with less than
|
||||||
views
|
COUNT views
|
||||||
|
--max-views COUNT Do not download any videos with more than
|
||||||
|
COUNT views
|
||||||
--no-playlist download only the currently playing video
|
--no-playlist download only the currently playing video
|
||||||
--age-limit YEARS download only videos suitable for the given age
|
--age-limit YEARS download only videos suitable for the given
|
||||||
--download-archive FILE Download only videos not listed in the archive
|
age
|
||||||
file. Record the IDs of all downloaded videos in
|
--download-archive FILE Download only videos not listed in the
|
||||||
it.
|
archive file. Record the IDs of all
|
||||||
|
downloaded videos in it.
|
||||||
|
--include-ads Download advertisements as well
|
||||||
|
(experimental)
|
||||||
|
--youtube-include-dash-manifest Try to download the DASH manifest on
|
||||||
|
YouTube videos (experimental)
|
||||||
|
|
||||||
## Download Options:
|
## Download Options:
|
||||||
-r, --rate-limit LIMIT maximum download rate in bytes per second (e.g.
|
-r, --rate-limit LIMIT maximum download rate in bytes per second
|
||||||
50K or 4.2M)
|
(e.g. 50K or 4.2M)
|
||||||
-R, --retries RETRIES number of retries (default is 10)
|
-R, --retries RETRIES number of retries (default is 10)
|
||||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
|
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
|
||||||
(default is 1024)
|
(default is 1024)
|
||||||
--no-resize-buffer do not automatically adjust the buffer size. By
|
--no-resize-buffer do not automatically adjust the buffer
|
||||||
default, the buffer size is automatically resized
|
size. By default, the buffer size is
|
||||||
from an initial value of SIZE.
|
automatically resized from an initial value
|
||||||
|
of SIZE.
|
||||||
|
|
||||||
## Filesystem Options:
|
## Filesystem Options:
|
||||||
-t, --title use title in file name (default)
|
-t, --title use title in file name (default)
|
||||||
--id use only video ID in file name
|
--id use only video ID in file name
|
||||||
-l, --literal [deprecated] alias of --title
|
-l, --literal [deprecated] alias of --title
|
||||||
-A, --auto-number number downloaded files starting from 00000
|
-A, --auto-number number downloaded files starting from 00000
|
||||||
-o, --output TEMPLATE output filename template. Use %(title)s to get
|
-o, --output TEMPLATE output filename template. Use %(title)s to
|
||||||
the title, %(uploader)s for the uploader name,
|
get the title, %(uploader)s for the
|
||||||
%(uploader_id)s for the uploader nickname if
|
uploader name, %(uploader_id)s for the
|
||||||
different, %(autonumber)s to get an automatically
|
uploader nickname if different,
|
||||||
incremented number, %(ext)s for the filename
|
%(autonumber)s to get an automatically
|
||||||
extension, %(format)s for the format description
|
incremented number, %(ext)s for the
|
||||||
(like "22 - 1280x720" or "HD"),%(format_id)s for
|
filename extension, %(format)s for the
|
||||||
the unique id of the format (like Youtube's
|
format description (like "22 - 1280x720" or
|
||||||
itags: "137"),%(upload_date)s for the upload date
|
"HD"), %(format_id)s for the unique id of
|
||||||
|
the format (like Youtube's itags: "137"),
|
||||||
|
%(upload_date)s for the upload date
|
||||||
(YYYYMMDD), %(extractor)s for the provider
|
(YYYYMMDD), %(extractor)s for the provider
|
||||||
(youtube, metacafe, etc), %(id)s for the video id
|
(youtube, metacafe, etc), %(id)s for the
|
||||||
, %(playlist)s for the playlist the video is in,
|
video id, %(playlist)s for the playlist the
|
||||||
%(playlist_index)s for the position in the
|
video is in, %(playlist_index)s for the
|
||||||
playlist and %% for a literal percent. Use - to
|
position in the playlist and %% for a
|
||||||
output to stdout. Can also be used to download to
|
literal percent. Use - to output to stdout.
|
||||||
a different directory, for example with -o '/my/d
|
Can also be used to download to a different
|
||||||
ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
directory, for example with -o '/my/downloa
|
||||||
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s
|
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||||
when it is present in output filename template or
|
--autonumber-size NUMBER Specifies the number of digits in
|
||||||
--auto-number option is given
|
%(autonumber)s when it is present in output
|
||||||
--restrict-filenames Restrict filenames to only ASCII characters, and
|
filename template or --auto-number option
|
||||||
avoid "&" and spaces in filenames
|
is given
|
||||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
--restrict-filenames Restrict filenames to only ASCII
|
||||||
|
characters, and avoid "&" and spaces in
|
||||||
|
filenames
|
||||||
|
-a, --batch-file FILE file containing URLs to download ('-' for
|
||||||
|
stdin)
|
||||||
--load-info FILE json file containing the video information
|
--load-info FILE json file containing the video information
|
||||||
(created with the "--write-json" option
|
(created with the "--write-json" option)
|
||||||
-w, --no-overwrites do not overwrite files
|
-w, --no-overwrites do not overwrite files
|
||||||
-c, --continue force resume of partially downloaded files. By
|
-c, --continue force resume of partially downloaded files.
|
||||||
default, youtube-dl will resume downloads if
|
By default, youtube-dl will resume
|
||||||
possible.
|
downloads if possible.
|
||||||
--no-continue do not resume partially downloaded files (restart
|
--no-continue do not resume partially downloaded files
|
||||||
from beginning)
|
(restart from beginning)
|
||||||
--cookies FILE file to read cookies from and dump cookie jar in
|
--cookies FILE file to read cookies from and dump cookie
|
||||||
|
jar in
|
||||||
--no-part do not use .part files
|
--no-part do not use .part files
|
||||||
--no-mtime do not use the Last-modified header to set the
|
--no-mtime do not use the Last-modified header to set
|
||||||
file modification time
|
the file modification time
|
||||||
--write-description write video description to a .description file
|
--write-description write video description to a .description
|
||||||
|
file
|
||||||
--write-info-json write video metadata to a .info.json file
|
--write-info-json write video metadata to a .info.json file
|
||||||
--write-annotations write video annotations to a .annotation file
|
--write-annotations write video annotations to a .annotation
|
||||||
|
file
|
||||||
--write-thumbnail write thumbnail image to disk
|
--write-thumbnail write thumbnail image to disk
|
||||||
|
|
||||||
## Verbosity / Simulation Options:
|
## Verbosity / Simulation Options:
|
||||||
@@ -139,57 +175,74 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
--no-progress do not print progress bar
|
--no-progress do not print progress bar
|
||||||
--console-title display progress in console titlebar
|
--console-title display progress in console titlebar
|
||||||
-v, --verbose print various debugging information
|
-v, --verbose print various debugging information
|
||||||
--dump-intermediate-pages print downloaded pages to debug problems(very
|
--dump-intermediate-pages print downloaded pages to debug problems
|
||||||
verbose)
|
(very verbose)
|
||||||
--write-pages Write downloaded intermediary pages to files in
|
--write-pages Write downloaded intermediary pages to
|
||||||
the current directory to debug problems
|
files in the current directory to debug
|
||||||
|
problems
|
||||||
|
--print-traffic Display sent and read HTTP traffic
|
||||||
|
|
||||||
## Video Format Options:
|
## Video Format Options:
|
||||||
-f, --format FORMAT video format code, specify the order of
|
-f, --format FORMAT video format code, specify the order of
|
||||||
preference using slashes: "-f 22/17/18". "-f mp4"
|
preference using slashes: "-f 22/17/18".
|
||||||
and "-f flv" are also supported
|
"-f mp4" and "-f flv" are also supported.
|
||||||
|
You can also use the special names "best",
|
||||||
|
"bestaudio", "worst", and "worstaudio". By
|
||||||
|
default, youtube-dl will pick the best
|
||||||
|
quality.
|
||||||
--all-formats download all available video formats
|
--all-formats download all available video formats
|
||||||
--prefer-free-formats prefer free video formats unless a specific one
|
--prefer-free-formats prefer free video formats unless a specific
|
||||||
is requested
|
one is requested
|
||||||
--max-quality FORMAT highest quality format to download
|
--max-quality FORMAT highest quality format to download
|
||||||
-F, --list-formats list all available formats (currently youtube
|
-F, --list-formats list all available formats
|
||||||
only)
|
|
||||||
|
|
||||||
## Subtitle Options:
|
## Subtitle Options:
|
||||||
--write-sub write subtitle file
|
--write-sub write subtitle file
|
||||||
--write-auto-sub write automatic subtitle file (youtube only)
|
--write-auto-sub write automatic subtitle file (youtube
|
||||||
--all-subs downloads all the available subtitles of the
|
|
||||||
video
|
|
||||||
--list-subs lists all available subtitles for the video
|
|
||||||
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt] youtube
|
|
||||||
only)
|
only)
|
||||||
--sub-lang LANGS languages of the subtitles to download (optional)
|
--all-subs downloads all the available subtitles of
|
||||||
separated by commas, use IETF language tags like
|
the video
|
||||||
'en,pt'
|
--list-subs lists all available subtitles for the video
|
||||||
|
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt]
|
||||||
|
youtube only)
|
||||||
|
--sub-lang LANGS languages of the subtitles to download
|
||||||
|
(optional) separated by commas, use IETF
|
||||||
|
language tags like 'en,pt'
|
||||||
|
|
||||||
## Authentication Options:
|
## Authentication Options:
|
||||||
-u, --username USERNAME account username
|
-u, --username USERNAME account username
|
||||||
-p, --password PASSWORD account password
|
-p, --password PASSWORD account password
|
||||||
-n, --netrc use .netrc authentication data
|
-n, --netrc use .netrc authentication data
|
||||||
--video-password PASSWORD video password (vimeo only)
|
--video-password PASSWORD video password (vimeo, smotri)
|
||||||
|
|
||||||
## Post-processing Options:
|
## Post-processing Options:
|
||||||
-x, --extract-audio convert video files to audio-only files (requires
|
-x, --extract-audio convert video files to audio-only files
|
||||||
ffmpeg or avconv and ffprobe or avprobe)
|
(requires ffmpeg or avconv and ffprobe or
|
||||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
|
avprobe)
|
||||||
"wav"; best by default
|
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
|
||||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert
|
"opus", or "wav"; best by default
|
||||||
a value between 0 (better) and 9 (worse) for VBR
|
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
|
||||||
or a specific bitrate like 128K (default 5)
|
insert a value between 0 (better) and 9
|
||||||
--recode-video FORMAT Encode the video to another format if necessary
|
(worse) for VBR or a specific bitrate like
|
||||||
(currently supported: mp4|flv|ogg|webm)
|
128K (default 5)
|
||||||
-k, --keep-video keeps the video file on disk after the post-
|
--recode-video FORMAT Encode the video to another format if
|
||||||
processing; the video is erased by default
|
necessary (currently supported:
|
||||||
--no-post-overwrites do not overwrite post-processed files; the post-
|
mp4|flv|ogg|webm)
|
||||||
processed files are overwritten by default
|
-k, --keep-video keeps the video file on disk after the
|
||||||
|
post-processing; the video is erased by
|
||||||
|
default
|
||||||
|
--no-post-overwrites do not overwrite post-processed files; the
|
||||||
|
post-processed files are overwritten by
|
||||||
|
default
|
||||||
--embed-subs embed subtitles in the video (only for mp4
|
--embed-subs embed subtitles in the video (only for mp4
|
||||||
videos)
|
videos)
|
||||||
--add-metadata add metadata to the files
|
--add-metadata write metadata to the video file
|
||||||
|
--xattrs write metadata to the video file's xattrs
|
||||||
|
(using dublin core and xdg standards)
|
||||||
|
--prefer-avconv Prefer avconv over ffmpeg for running the
|
||||||
|
postprocessors (default)
|
||||||
|
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
|
||||||
|
postprocessors
|
||||||
|
|
||||||
# CONFIGURATION
|
# CONFIGURATION
|
||||||
|
|
||||||
@@ -228,9 +281,14 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
|
|||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
$ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months
|
# Download only the videos uploaded in the last 6 months
|
||||||
$ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970
|
$ youtube-dl --dateafter now-6months
|
||||||
$ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010
|
|
||||||
|
# Download only the videos uploaded on January 1, 1970
|
||||||
|
$ youtube-dl --date 19700101
|
||||||
|
|
||||||
|
$ # will only download the videos uploaded in the 200x decade
|
||||||
|
$ youtube-dl --dateafter 20000101 --datebefore 20091231
|
||||||
|
|
||||||
# FAQ
|
# FAQ
|
||||||
|
|
||||||
@@ -275,11 +333,31 @@ Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unz
|
|||||||
|
|
||||||
To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29).
|
To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29).
|
||||||
|
|
||||||
# COPYRIGHT
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
youtube-dl is released into the public domain by the copyright holders.
|
Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
||||||
|
|
||||||
This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain.
|
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
||||||
|
|
||||||
|
python -m youtube_dl
|
||||||
|
|
||||||
|
To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
|
||||||
|
|
||||||
|
python -m unittest discover
|
||||||
|
python test/test_download.py
|
||||||
|
nosetests
|
||||||
|
|
||||||
|
If you want to create a build of youtube-dl yourself, you'll need
|
||||||
|
|
||||||
|
* python
|
||||||
|
* make
|
||||||
|
* pandoc
|
||||||
|
* zip
|
||||||
|
* nosetests
|
||||||
|
|
||||||
|
### Adding support for a new site
|
||||||
|
|
||||||
|
If you want to add support for a new site, copy *any* [recently modified](https://github.com/rg3/youtube-dl/commits/master/youtube_dl/extractor) file in `youtube_dl/extractor`, add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Don't forget to run the tests with `python test/test_download.py Test_Download.test_YourExtractor`! For a detailed tutorial, refer to [this blog post](http://filippo.io/add-support-for-a-new-video-site-to-youtube-dl/).
|
||||||
|
|
||||||
# BUGS
|
# BUGS
|
||||||
|
|
||||||
@@ -309,7 +387,7 @@ Site support requests must contain an example URL. An example URL is a URL you m
|
|||||||
|
|
||||||
### Are you using the latest version?
|
### Are you using the latest version?
|
||||||
|
|
||||||
Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. Ábout 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
|
Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
|
||||||
|
|
||||||
### Is the issue already documented?
|
### Is the issue already documented?
|
||||||
|
|
||||||
@@ -334,3 +412,13 @@ In particular, every site support request issue should only pertain to services
|
|||||||
### Is anyone going to need the feature?
|
### Is anyone going to need the feature?
|
||||||
|
|
||||||
Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
||||||
|
|
||||||
|
### Is your question about youtube-dl?
|
||||||
|
|
||||||
|
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
||||||
|
|
||||||
|
# COPYRIGHT
|
||||||
|
|
||||||
|
youtube-dl is released into the public domain by the copyright holders.
|
||||||
|
|
||||||
|
This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ __youtube_dl()
|
|||||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||||
opts="{{flags}}"
|
opts="{{flags}}"
|
||||||
keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
|
keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
|
||||||
fileopts="-a|--batch-file|--download-archive|--cookies"
|
fileopts="-a|--batch-file|--download-archive|--cookies|--load-info"
|
||||||
diropts="--cache-dir"
|
diropts="--cache-dir"
|
||||||
|
|
||||||
if [[ ${prev} =~ ${fileopts} ]]; then
|
if [[ ${prev} =~ ${fileopts} ]]; then
|
||||||
|
|||||||
@@ -3,6 +3,9 @@
|
|||||||
"""
|
"""
|
||||||
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
|
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
|
||||||
if we are not 'age_limit' tagging some porn site
|
if we are not 'age_limit' tagging some porn site
|
||||||
|
|
||||||
|
A second approach implemented relies on a list of porn domains, to activate it
|
||||||
|
pass the list filename as the only argument
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
@@ -11,9 +14,17 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import get_testcases
|
from test.helper import get_testcases
|
||||||
|
from youtube_dl.utils import compat_urllib_parse_urlparse
|
||||||
from youtube_dl.utils import compat_urllib_request
|
from youtube_dl.utils import compat_urllib_request
|
||||||
|
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
METHOD = 'LIST'
|
||||||
|
LIST = open(sys.argv[1]).read().decode('utf8').strip()
|
||||||
|
else:
|
||||||
|
METHOD = 'EURISTIC'
|
||||||
|
|
||||||
for test in get_testcases():
|
for test in get_testcases():
|
||||||
|
if METHOD == 'EURISTIC':
|
||||||
try:
|
try:
|
||||||
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
||||||
except:
|
except:
|
||||||
@@ -22,14 +33,23 @@ for test in get_testcases():
|
|||||||
|
|
||||||
webpage = webpage.decode('utf8', 'replace')
|
webpage = webpage.decode('utf8', 'replace')
|
||||||
|
|
||||||
if 'porn' in webpage.lower() and ('info_dict' not in test
|
RESULT = 'porn' in webpage.lower()
|
||||||
or 'age_limit' not in test['info_dict']
|
|
||||||
|
elif METHOD == 'LIST':
|
||||||
|
domain = compat_urllib_parse_urlparse(test['url']).netloc
|
||||||
|
if not domain:
|
||||||
|
print('\nFail: {0}'.format(test['name']))
|
||||||
|
continue
|
||||||
|
domain = '.'.join(domain.split('.')[-2:])
|
||||||
|
|
||||||
|
RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
|
||||||
|
|
||||||
|
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
|
||||||
or test['info_dict']['age_limit'] != 18):
|
or test['info_dict']['age_limit'] != 18):
|
||||||
print('\nPotential missing age_limit check: {0}'.format(test['name']))
|
print('\nPotential missing age_limit check: {0}'.format(test['name']))
|
||||||
|
|
||||||
elif 'porn' not in webpage.lower() and ('info_dict' in test and
|
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
|
||||||
'age_limit' in test['info_dict'] and
|
and test['info_dict']['age_limit'] == 18):
|
||||||
test['info_dict']['age_limit'] == 18):
|
|
||||||
print('\nPotential false negative: {0}'.format(test['name']))
|
print('\nPotential false negative: {0}'.format(test['name']))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,56 +1,76 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
|
import io
|
||||||
|
import json
|
||||||
import textwrap
|
import textwrap
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
atom_template = textwrap.dedent("""\
|
atom_template = textwrap.dedent("""\
|
||||||
<?xml version='1.0' encoding='utf-8'?>
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
|
<feed xmlns="http://www.w3.org/2005/Atom">
|
||||||
<atom:title>youtube-dl releases</atom:title>
|
<link rel="self" href="http://rg3.github.io/youtube-dl/update/releases.atom" />
|
||||||
<atom:id>youtube-dl-updates-feed</atom:id>
|
<title>youtube-dl releases</title>
|
||||||
<atom:updated>@TIMESTAMP@</atom:updated>
|
<id>https://yt-dl.org/feed/youtube-dl-updates-feed</id>
|
||||||
|
<updated>@TIMESTAMP@</updated>
|
||||||
@ENTRIES@
|
@ENTRIES@
|
||||||
</atom:feed>""")
|
</feed>""")
|
||||||
|
|
||||||
entry_template = textwrap.dedent("""
|
entry_template = textwrap.dedent("""
|
||||||
<atom:entry>
|
<entry>
|
||||||
<atom:id>youtube-dl-@VERSION@</atom:id>
|
<id>https://yt-dl.org/feed/youtube-dl-updates-feed/youtube-dl-@VERSION@</id>
|
||||||
<atom:title>New version @VERSION@</atom:title>
|
<title>New version @VERSION@</title>
|
||||||
<atom:link href="http://rg3.github.io/youtube-dl" />
|
<link href="http://rg3.github.io/youtube-dl" />
|
||||||
<atom:content type="xhtml">
|
<content type="xhtml">
|
||||||
<div xmlns="http://www.w3.org/1999/xhtml">
|
<div xmlns="http://www.w3.org/1999/xhtml">
|
||||||
Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a>
|
Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a>
|
||||||
</div>
|
</div>
|
||||||
</atom:content>
|
</content>
|
||||||
<atom:author>
|
<author>
|
||||||
<atom:name>The youtube-dl maintainers</atom:name>
|
<name>The youtube-dl maintainers</name>
|
||||||
</atom:author>
|
</author>
|
||||||
<atom:updated>@TIMESTAMP@</atom:updated>
|
<updated>@TIMESTAMP@</updated>
|
||||||
</atom:entry>
|
</entry>
|
||||||
""")
|
""")
|
||||||
|
|
||||||
now = datetime.datetime.now()
|
now = datetime.datetime.now()
|
||||||
now_iso = now.isoformat()
|
now_iso = now.isoformat() + 'Z'
|
||||||
|
|
||||||
atom_template = atom_template.replace('@TIMESTAMP@', now_iso)
|
atom_template = atom_template.replace('@TIMESTAMP@', now_iso)
|
||||||
|
|
||||||
entries=[]
|
|
||||||
|
|
||||||
versions_info = json.load(open('update/versions.json'))
|
versions_info = json.load(open('update/versions.json'))
|
||||||
versions = list(versions_info['versions'].keys())
|
versions = list(versions_info['versions'].keys())
|
||||||
versions.sort()
|
versions.sort()
|
||||||
|
|
||||||
|
entries = []
|
||||||
for v in versions:
|
for v in versions:
|
||||||
entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
|
fields = v.split('.')
|
||||||
|
year, month, day = map(int, fields[:3])
|
||||||
|
faked = 0
|
||||||
|
patchlevel = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
datetime.date(year, month, day)
|
||||||
|
except ValueError:
|
||||||
|
day -= 1
|
||||||
|
faked += 1
|
||||||
|
assert day > 0
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
if len(fields) >= 4:
|
||||||
|
try:
|
||||||
|
patchlevel = int(fields[3])
|
||||||
|
except ValueError:
|
||||||
|
patchlevel = 1
|
||||||
|
timestamp = '%04d-%02d-%02dT00:%02d:%02dZ' % (year, month, day, faked, patchlevel)
|
||||||
|
|
||||||
|
entry = entry_template.replace('@TIMESTAMP@', timestamp)
|
||||||
entry = entry.replace('@VERSION@', v)
|
entry = entry.replace('@VERSION@', v)
|
||||||
entries.append(entry)
|
entries.append(entry)
|
||||||
|
|
||||||
entries_str = textwrap.indent(''.join(entries), '\t')
|
entries_str = textwrap.indent(''.join(entries), '\t')
|
||||||
atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||||
|
|
||||||
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
|
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
||||||
atom_file.write(atom_template)
|
atom_file.write(atom_template)
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,14 @@
|
|||||||
|
import io
|
||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
|
|
||||||
README_FILE = 'README.md'
|
README_FILE = 'README.md'
|
||||||
helptext = sys.stdin.read()
|
helptext = sys.stdin.read()
|
||||||
|
|
||||||
with open(README_FILE) as f:
|
if isinstance(helptext, bytes):
|
||||||
|
helptext = helptext.decode('utf-8')
|
||||||
|
|
||||||
|
with io.open(README_FILE, encoding='utf-8') as f:
|
||||||
oldreadme = f.read()
|
oldreadme = f.read()
|
||||||
|
|
||||||
header = oldreadme[:oldreadme.index('# OPTIONS')]
|
header = oldreadme[:oldreadme.index('# OPTIONS')]
|
||||||
@@ -14,7 +18,7 @@ options = helptext[helptext.index(' General Options:')+19:]
|
|||||||
options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M)
|
options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M)
|
||||||
options = '# OPTIONS\n' + options + '\n'
|
options = '# OPTIONS\n' + options + '\n'
|
||||||
|
|
||||||
with open(README_FILE, 'w') as f:
|
with io.open(README_FILE, 'w', encoding='utf-8') as f:
|
||||||
f.write(header)
|
f.write(header)
|
||||||
f.write(options)
|
f.write(options)
|
||||||
f.write(footer)
|
f.write(footer)
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.0
|
|||||||
version="$1"
|
version="$1"
|
||||||
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
|
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
|
||||||
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
|
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
|
||||||
|
useless_files=$(find youtube_dl -type f -not -name '*.py')
|
||||||
|
if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi
|
||||||
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
||||||
|
|
||||||
/bin/echo -e "\n### First of all, testing..."
|
/bin/echo -e "\n### First of all, testing..."
|
||||||
|
|||||||
23
setup.py
23
setup.py
@@ -3,7 +3,9 @@
|
|||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import os.path
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
import warnings
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -44,12 +46,24 @@ py2exe_params = {
|
|||||||
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||||
params = py2exe_params
|
params = py2exe_params
|
||||||
else:
|
else:
|
||||||
params = {
|
files_spec = [
|
||||||
'data_files': [ # Installing system-wide would require sudo...
|
|
||||||
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
|
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
|
||||||
('share/doc/youtube_dl', ['README.txt']),
|
('share/doc/youtube_dl', ['README.txt']),
|
||||||
('share/man/man1', ['youtube-dl.1'])
|
('share/man/man1', ['youtube-dl.1'])
|
||||||
]
|
]
|
||||||
|
root = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
data_files = []
|
||||||
|
for dirname, files in files_spec:
|
||||||
|
resfiles = []
|
||||||
|
for fn in files:
|
||||||
|
if not os.path.exists(fn):
|
||||||
|
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
|
||||||
|
else:
|
||||||
|
resfiles.append(fn)
|
||||||
|
data_files.append((dirname, resfiles))
|
||||||
|
|
||||||
|
params = {
|
||||||
|
'data_files': data_files,
|
||||||
}
|
}
|
||||||
if setuptools_available:
|
if setuptools_available:
|
||||||
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
|
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
|
||||||
@@ -71,7 +85,10 @@ setup(
|
|||||||
author_email='ytdl@yt-dl.org',
|
author_email='ytdl@yt-dl.org',
|
||||||
maintainer='Philipp Hagemeister',
|
maintainer='Philipp Hagemeister',
|
||||||
maintainer_email='phihag@phihag.de',
|
maintainer_email='phihag@phihag.de',
|
||||||
packages=['youtube_dl', 'youtube_dl.extractor'],
|
packages=[
|
||||||
|
'youtube_dl',
|
||||||
|
'youtube_dl.extractor', 'youtube_dl.downloader',
|
||||||
|
'youtube_dl.postprocessor'],
|
||||||
|
|
||||||
# Provokes warning on most systems (why?!)
|
# Provokes warning on most systems (why?!)
|
||||||
# test_suite = 'nose.collector',
|
# test_suite = 'nose.collector',
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -8,6 +10,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
|
|
||||||
from test.helper import FakeYDL
|
from test.helper import FakeYDL
|
||||||
from youtube_dl import YoutubeDL
|
from youtube_dl import YoutubeDL
|
||||||
|
from youtube_dl.extractor import YoutubeIE
|
||||||
|
|
||||||
|
|
||||||
class YDL(FakeYDL):
|
class YDL(FakeYDL):
|
||||||
@@ -29,105 +32,191 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = True
|
ydl.params['prefer_free_formats'] = True
|
||||||
formats = [
|
formats = [
|
||||||
{u'ext': u'webm', u'height': 460},
|
{'ext': 'webm', 'height': 460},
|
||||||
{u'ext': u'mp4', u'height': 460},
|
{'ext': 'mp4', 'height': 460},
|
||||||
]
|
]
|
||||||
info_dict = {u'formats': formats, u'extractor': u'test'}
|
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded[u'ext'], u'webm')
|
self.assertEqual(downloaded['ext'], 'webm')
|
||||||
|
|
||||||
# Different resolution => download best quality (mp4)
|
# Different resolution => download best quality (mp4)
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = True
|
ydl.params['prefer_free_formats'] = True
|
||||||
formats = [
|
formats = [
|
||||||
{u'ext': u'webm', u'height': 720},
|
{'ext': 'webm', 'height': 720},
|
||||||
{u'ext': u'mp4', u'height': 1080},
|
{'ext': 'mp4', 'height': 1080},
|
||||||
]
|
]
|
||||||
info_dict[u'formats'] = formats
|
info_dict['formats'] = formats
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded[u'ext'], u'mp4')
|
self.assertEqual(downloaded['ext'], 'mp4')
|
||||||
|
|
||||||
# No prefer_free_formats => keep original formats order
|
# No prefer_free_formats => prefer mp4 and flv for greater compatibilty
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = False
|
ydl.params['prefer_free_formats'] = False
|
||||||
formats = [
|
formats = [
|
||||||
{u'ext': u'webm', u'height': 720},
|
{'ext': 'webm', 'height': 720},
|
||||||
{u'ext': u'flv', u'height': 720},
|
{'ext': 'mp4', 'height': 720},
|
||||||
|
{'ext': 'flv', 'height': 720},
|
||||||
]
|
]
|
||||||
info_dict[u'formats'] = formats
|
info_dict['formats'] = formats
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded[u'ext'], u'flv')
|
self.assertEqual(downloaded['ext'], 'mp4')
|
||||||
|
|
||||||
|
ydl = YDL()
|
||||||
|
ydl.params['prefer_free_formats'] = False
|
||||||
|
formats = [
|
||||||
|
{'ext': 'flv', 'height': 720},
|
||||||
|
{'ext': 'webm', 'height': 720},
|
||||||
|
]
|
||||||
|
info_dict['formats'] = formats
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['ext'], 'flv')
|
||||||
|
|
||||||
def test_format_limit(self):
|
def test_format_limit(self):
|
||||||
formats = [
|
formats = [
|
||||||
{u'format_id': u'meh', u'url': u'http://example.com/meh'},
|
{'format_id': 'meh', 'url': 'http://example.com/meh', 'preference': 1},
|
||||||
{u'format_id': u'good', u'url': u'http://example.com/good'},
|
{'format_id': 'good', 'url': 'http://example.com/good', 'preference': 2},
|
||||||
{u'format_id': u'great', u'url': u'http://example.com/great'},
|
{'format_id': 'great', 'url': 'http://example.com/great', 'preference': 3},
|
||||||
{u'format_id': u'excellent', u'url': u'http://example.com/exc'},
|
{'format_id': 'excellent', 'url': 'http://example.com/exc', 'preference': 4},
|
||||||
]
|
]
|
||||||
info_dict = {
|
info_dict = {
|
||||||
u'formats': formats, u'extractor': u'test', 'id': 'testvid'}
|
'formats': formats, 'extractor': 'test', 'id': 'testvid'}
|
||||||
|
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded[u'format_id'], u'excellent')
|
self.assertEqual(downloaded['format_id'], 'excellent')
|
||||||
|
|
||||||
ydl = YDL({'format_limit': 'good'})
|
ydl = YDL({'format_limit': 'good'})
|
||||||
assert ydl.params['format_limit'] == 'good'
|
assert ydl.params['format_limit'] == 'good'
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict.copy())
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded[u'format_id'], u'good')
|
self.assertEqual(downloaded['format_id'], 'good')
|
||||||
|
|
||||||
ydl = YDL({'format_limit': 'great', 'format': 'all'})
|
ydl = YDL({'format_limit': 'great', 'format': 'all'})
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict.copy())
|
||||||
self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh')
|
self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'meh')
|
||||||
self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good')
|
self.assertEqual(ydl.downloaded_info_dicts[1]['format_id'], 'good')
|
||||||
self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great')
|
self.assertEqual(ydl.downloaded_info_dicts[2]['format_id'], 'great')
|
||||||
self.assertTrue('3' in ydl.msgs[0])
|
self.assertTrue('3' in ydl.msgs[0])
|
||||||
|
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['format_limit'] = 'excellent'
|
ydl.params['format_limit'] = 'excellent'
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict.copy())
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded[u'format_id'], u'excellent')
|
self.assertEqual(downloaded['format_id'], 'excellent')
|
||||||
|
|
||||||
def test_format_selection(self):
|
def test_format_selection(self):
|
||||||
formats = [
|
formats = [
|
||||||
{u'format_id': u'35', u'ext': u'mp4'},
|
{'format_id': '35', 'ext': 'mp4', 'preference': 1},
|
||||||
{u'format_id': u'45', u'ext': u'webm'},
|
{'format_id': '45', 'ext': 'webm', 'preference': 2},
|
||||||
{u'format_id': u'47', u'ext': u'webm'},
|
{'format_id': '47', 'ext': 'webm', 'preference': 3},
|
||||||
{u'format_id': u'2', u'ext': u'flv'},
|
{'format_id': '2', 'ext': 'flv', 'preference': 4},
|
||||||
]
|
]
|
||||||
info_dict = {u'formats': formats, u'extractor': u'test'}
|
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||||
|
|
||||||
ydl = YDL({'format': u'20/47'})
|
ydl = YDL({'format': '20/47'})
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict.copy())
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], u'47')
|
self.assertEqual(downloaded['format_id'], '47')
|
||||||
|
|
||||||
ydl = YDL({'format': u'20/71/worst'})
|
ydl = YDL({'format': '20/71/worst'})
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict.copy())
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], u'35')
|
self.assertEqual(downloaded['format_id'], '35')
|
||||||
|
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict.copy())
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], u'2')
|
self.assertEqual(downloaded['format_id'], '2')
|
||||||
|
|
||||||
ydl = YDL({'format': u'webm/mp4'})
|
ydl = YDL({'format': 'webm/mp4'})
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict.copy())
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], u'47')
|
self.assertEqual(downloaded['format_id'], '47')
|
||||||
|
|
||||||
ydl = YDL({'format': u'3gp/40/mp4'})
|
ydl = YDL({'format': '3gp/40/mp4'})
|
||||||
|
ydl.process_ie_result(info_dict.copy())
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], '35')
|
||||||
|
|
||||||
|
def test_format_selection_audio(self):
|
||||||
|
formats = [
|
||||||
|
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none'},
|
||||||
|
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none'},
|
||||||
|
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none'},
|
||||||
|
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4},
|
||||||
|
]
|
||||||
|
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'bestaudio'})
|
||||||
|
ydl.process_ie_result(info_dict.copy())
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'audio-high')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'worstaudio'})
|
||||||
|
ydl.process_ie_result(info_dict.copy())
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'audio-low')
|
||||||
|
|
||||||
|
formats = [
|
||||||
|
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1},
|
||||||
|
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2},
|
||||||
|
]
|
||||||
|
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
|
||||||
|
ydl.process_ie_result(info_dict.copy())
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'vid-high')
|
||||||
|
|
||||||
|
def test_youtube_format_selection(self):
|
||||||
|
order = [
|
||||||
|
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
|
||||||
|
# Apple HTTP Live Streaming
|
||||||
|
'96', '95', '94', '93', '92', '132', '151',
|
||||||
|
# 3D
|
||||||
|
'85', '84', '102', '83', '101', '82', '100',
|
||||||
|
# Dash video
|
||||||
|
'138', '137', '248', '136', '247', '135', '246',
|
||||||
|
'245', '244', '134', '243', '133', '242', '160',
|
||||||
|
# Dash audio
|
||||||
|
'141', '172', '140', '139', '171',
|
||||||
|
]
|
||||||
|
|
||||||
|
for f1id, f2id in zip(order, order[1:]):
|
||||||
|
f1 = YoutubeIE._formats[f1id].copy()
|
||||||
|
f1['format_id'] = f1id
|
||||||
|
f2 = YoutubeIE._formats[f2id].copy()
|
||||||
|
f2['format_id'] = f2id
|
||||||
|
|
||||||
|
info_dict = {'formats': [f1, f2], 'extractor': 'youtube'}
|
||||||
|
ydl = YDL()
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], u'35')
|
self.assertEqual(downloaded['format_id'], f1id)
|
||||||
|
|
||||||
|
info_dict = {'formats': [f2, f1], 'extractor': 'youtube'}
|
||||||
|
ydl = YDL()
|
||||||
|
yie = YoutubeIE(ydl)
|
||||||
|
yie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], f1id)
|
||||||
|
|
||||||
def test_add_extra_info(self):
|
def test_add_extra_info(self):
|
||||||
test_dict = {
|
test_dict = {
|
||||||
@@ -143,17 +232,17 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
|
|
||||||
def test_prepare_filename(self):
|
def test_prepare_filename(self):
|
||||||
info = {
|
info = {
|
||||||
u'id': u'1234',
|
'id': '1234',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'width': None,
|
'width': None,
|
||||||
}
|
}
|
||||||
def fname(templ):
|
def fname(templ):
|
||||||
ydl = YoutubeDL({'outtmpl': templ})
|
ydl = YoutubeDL({'outtmpl': templ})
|
||||||
return ydl.prepare_filename(info)
|
return ydl.prepare_filename(info)
|
||||||
self.assertEqual(fname(u'%(id)s.%(ext)s'), u'1234.mp4')
|
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
|
||||||
self.assertEqual(fname(u'%(id)s-%(width)s.%(ext)s'), u'1234-NA.mp4')
|
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
|
||||||
# Replace missing fields with 'NA'
|
# Replace missing fields with 'NA'
|
||||||
self.assertEqual(fname(u'%(uploader_date)s-%(id)s.%(ext)s'), u'NA-1234.mp4')
|
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -13,6 +15,7 @@ from youtube_dl.extractor import (
|
|||||||
FacebookIE,
|
FacebookIE,
|
||||||
gen_extractors,
|
gen_extractors,
|
||||||
JustinTVIE,
|
JustinTVIE,
|
||||||
|
PBSIE,
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,18 +32,20 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
|
|
||||||
def test_youtube_playlist_matching(self):
|
def test_youtube_playlist_matching(self):
|
||||||
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
||||||
assertPlaylist(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
assertPlaylist(u'UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
||||||
assertPlaylist(u'PL63F0C78739B09958')
|
assertPlaylist('PL63F0C78739B09958')
|
||||||
assertPlaylist(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||||
assertPlaylist(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
assertPlaylist(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||||
assertPlaylist(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
||||||
self.assertFalse('youtube:playlist' in self.matching_ies(u'PLtS2H6bU1M'))
|
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||||
|
# Top tracks
|
||||||
|
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
||||||
|
|
||||||
def test_youtube_matching(self):
|
def test_youtube_matching(self):
|
||||||
self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))
|
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
||||||
self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||||
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
||||||
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
||||||
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
||||||
@@ -63,6 +68,9 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
def test_youtube_show_matching(self):
|
def test_youtube_show_matching(self):
|
||||||
self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])
|
self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])
|
||||||
|
|
||||||
|
def test_youtube_truncated(self):
|
||||||
|
self.assertMatch('http://www.youtube.com/watch?', ['youtube:truncated_url'])
|
||||||
|
|
||||||
def test_justin_tv_channelid_matching(self):
|
def test_justin_tv_channelid_matching(self):
|
||||||
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv"))
|
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv"))
|
||||||
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv"))
|
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv"))
|
||||||
@@ -80,7 +88,7 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361"))
|
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361"))
|
||||||
|
|
||||||
def test_youtube_extract(self):
|
def test_youtube_extract(self):
|
||||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE()._extract_id(url), id)
|
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
@@ -89,7 +97,7 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
||||||
|
|
||||||
def test_facebook_matching(self):
|
def test_facebook_matching(self):
|
||||||
self.assertTrue(FacebookIE.suitable(u'https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
||||||
|
|
||||||
def test_no_duplicates(self):
|
def test_no_duplicates(self):
|
||||||
ies = gen_extractors()
|
ies = gen_extractors()
|
||||||
@@ -113,10 +121,20 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
def test_vimeo_matching(self):
|
def test_vimeo_matching(self):
|
||||||
self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
|
self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
|
||||||
self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
|
self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
|
||||||
|
self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user'])
|
||||||
|
self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
|
||||||
|
|
||||||
# https://github.com/rg3/youtube-dl/issues/1930
|
# https://github.com/rg3/youtube-dl/issues/1930
|
||||||
def test_soundcloud_not_matching_sets(self):
|
def test_soundcloud_not_matching_sets(self):
|
||||||
self.assertMatch('http://soundcloud.com/floex/sets/gone-ep', ['soundcloud:set'])
|
self.assertMatch('http://soundcloud.com/floex/sets/gone-ep', ['soundcloud:set'])
|
||||||
|
|
||||||
|
def test_tumblr(self):
|
||||||
|
self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', ['Tumblr'])
|
||||||
|
self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430', ['Tumblr'])
|
||||||
|
|
||||||
|
def test_pbs(self):
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/2350
|
||||||
|
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ import socket
|
|||||||
|
|
||||||
import youtube_dl.YoutubeDL
|
import youtube_dl.YoutubeDL
|
||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
|
compat_http_client,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
@@ -90,7 +91,7 @@ def generator(test_case):
|
|||||||
def _hook(status):
|
def _hook(status):
|
||||||
if status['status'] == 'finished':
|
if status['status'] == 'finished':
|
||||||
finished_hook_called.add(status['filename'])
|
finished_hook_called.add(status['filename'])
|
||||||
ydl.fd.add_progress_hook(_hook)
|
ydl.add_progress_hook(_hook)
|
||||||
|
|
||||||
def get_tc_filename(tc):
|
def get_tc_filename(tc):
|
||||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
||||||
@@ -110,7 +111,7 @@ def generator(test_case):
|
|||||||
ydl.download([test_case['url']])
|
ydl.download([test_case['url']])
|
||||||
except (DownloadError, ExtractorError) as err:
|
except (DownloadError, ExtractorError) as err:
|
||||||
# Check if the exception is not a network related one
|
# Check if the exception is not a network related one
|
||||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if try_num == RETRIES:
|
if try_num == RETRIES:
|
||||||
@@ -148,7 +149,7 @@ def generator(test_case):
|
|||||||
for key, value in info_dict.items()
|
for key, value in info_dict.items()
|
||||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
||||||
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
||||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=2) + u'\n')
|
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
||||||
|
|
||||||
# Check for the presence of mandatory fields
|
# Check for the presence of mandatory fields
|
||||||
for key in ('id', 'url', 'title', 'ext'):
|
for key in ('id', 'url', 'title', 'ext'):
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@@ -12,6 +13,7 @@ from test.helper import FakeYDL
|
|||||||
|
|
||||||
|
|
||||||
from youtube_dl.extractor import (
|
from youtube_dl.extractor import (
|
||||||
|
AcademicEarthCourseIE,
|
||||||
DailymotionPlaylistIE,
|
DailymotionPlaylistIE,
|
||||||
DailymotionUserIE,
|
DailymotionUserIE,
|
||||||
VimeoChannelIE,
|
VimeoChannelIE,
|
||||||
@@ -26,7 +28,14 @@ from youtube_dl.extractor import (
|
|||||||
BambuserChannelIE,
|
BambuserChannelIE,
|
||||||
BandcampAlbumIE,
|
BandcampAlbumIE,
|
||||||
SmotriCommunityIE,
|
SmotriCommunityIE,
|
||||||
SmotriUserIE
|
SmotriUserIE,
|
||||||
|
IviCompilationIE,
|
||||||
|
ImdbListIE,
|
||||||
|
KhanAcademyIE,
|
||||||
|
EveryonesMixtapeIE,
|
||||||
|
RutubeChannelIE,
|
||||||
|
GoogleSearchIE,
|
||||||
|
GenericIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -40,23 +49,23 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = DailymotionPlaylistIE(dl)
|
ie = DailymotionPlaylistIE(dl)
|
||||||
result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
|
result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'SPORT')
|
self.assertEqual(result['title'], 'SPORT')
|
||||||
self.assertTrue(len(result['entries']) > 20)
|
self.assertTrue(len(result['entries']) > 20)
|
||||||
|
|
||||||
def test_dailymotion_user(self):
|
def test_dailymotion_user(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = DailymotionUserIE(dl)
|
ie = DailymotionUserIE(dl)
|
||||||
result = ie.extract('http://www.dailymotion.com/user/generation-quoi/')
|
result = ie.extract('https://www.dailymotion.com/user/nqtv')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'Génération Quoi')
|
self.assertEqual(result['title'], 'Rémi Gaillard')
|
||||||
self.assertTrue(len(result['entries']) >= 26)
|
self.assertTrue(len(result['entries']) >= 100)
|
||||||
|
|
||||||
def test_vimeo_channel(self):
|
def test_vimeo_channel(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = VimeoChannelIE(dl)
|
ie = VimeoChannelIE(dl)
|
||||||
result = ie.extract('http://vimeo.com/channels/tributes')
|
result = ie.extract('http://vimeo.com/channels/tributes')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'Vimeo Tributes')
|
self.assertEqual(result['title'], 'Vimeo Tributes')
|
||||||
self.assertTrue(len(result['entries']) > 24)
|
self.assertTrue(len(result['entries']) > 24)
|
||||||
|
|
||||||
def test_vimeo_user(self):
|
def test_vimeo_user(self):
|
||||||
@@ -64,7 +73,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = VimeoUserIE(dl)
|
ie = VimeoUserIE(dl)
|
||||||
result = ie.extract('http://vimeo.com/nkistudio/videos')
|
result = ie.extract('http://vimeo.com/nkistudio/videos')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'Nki')
|
self.assertEqual(result['title'], 'Nki')
|
||||||
self.assertTrue(len(result['entries']) > 65)
|
self.assertTrue(len(result['entries']) > 65)
|
||||||
|
|
||||||
def test_vimeo_album(self):
|
def test_vimeo_album(self):
|
||||||
@@ -72,7 +81,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = VimeoAlbumIE(dl)
|
ie = VimeoAlbumIE(dl)
|
||||||
result = ie.extract('http://vimeo.com/album/2632481')
|
result = ie.extract('http://vimeo.com/album/2632481')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'Staff Favorites: November 2013')
|
self.assertEqual(result['title'], 'Staff Favorites: November 2013')
|
||||||
self.assertTrue(len(result['entries']) > 12)
|
self.assertTrue(len(result['entries']) > 12)
|
||||||
|
|
||||||
def test_vimeo_groups(self):
|
def test_vimeo_groups(self):
|
||||||
@@ -80,7 +89,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = VimeoGroupsIE(dl)
|
ie = VimeoGroupsIE(dl)
|
||||||
result = ie.extract('http://vimeo.com/groups/rolexawards')
|
result = ie.extract('http://vimeo.com/groups/rolexawards')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'Rolex Awards for Enterprise')
|
self.assertEqual(result['title'], 'Rolex Awards for Enterprise')
|
||||||
self.assertTrue(len(result['entries']) > 72)
|
self.assertTrue(len(result['entries']) > 72)
|
||||||
|
|
||||||
def test_ustream_channel(self):
|
def test_ustream_channel(self):
|
||||||
@@ -88,7 +97,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = UstreamChannelIE(dl)
|
ie = UstreamChannelIE(dl)
|
||||||
result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
|
result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['id'], u'5124905')
|
self.assertEqual(result['id'], '5124905')
|
||||||
self.assertTrue(len(result['entries']) >= 11)
|
self.assertTrue(len(result['entries']) >= 11)
|
||||||
|
|
||||||
def test_soundcloud_set(self):
|
def test_soundcloud_set(self):
|
||||||
@@ -96,7 +105,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = SoundcloudSetIE(dl)
|
ie = SoundcloudSetIE(dl)
|
||||||
result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
|
result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'The Royal Concept EP')
|
self.assertEqual(result['title'], 'The Royal Concept EP')
|
||||||
self.assertTrue(len(result['entries']) >= 6)
|
self.assertTrue(len(result['entries']) >= 6)
|
||||||
|
|
||||||
def test_soundcloud_user(self):
|
def test_soundcloud_user(self):
|
||||||
@@ -104,7 +113,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = SoundcloudUserIE(dl)
|
ie = SoundcloudUserIE(dl)
|
||||||
result = ie.extract('https://soundcloud.com/the-concept-band')
|
result = ie.extract('https://soundcloud.com/the-concept-band')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['id'], u'9615865')
|
self.assertEqual(result['id'], '9615865')
|
||||||
self.assertTrue(len(result['entries']) >= 12)
|
self.assertTrue(len(result['entries']) >= 12)
|
||||||
|
|
||||||
def test_livestream_event(self):
|
def test_livestream_event(self):
|
||||||
@@ -112,7 +121,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = LivestreamIE(dl)
|
ie = LivestreamIE(dl)
|
||||||
result = ie.extract('http://new.livestream.com/tedx/cityenglish')
|
result = ie.extract('http://new.livestream.com/tedx/cityenglish')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'TEDCity2.0 (English)')
|
self.assertEqual(result['title'], 'TEDCity2.0 (English)')
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
self.assertTrue(len(result['entries']) >= 4)
|
||||||
|
|
||||||
def test_nhl_videocenter(self):
|
def test_nhl_videocenter(self):
|
||||||
@@ -120,8 +129,8 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = NHLVideocenterIE(dl)
|
ie = NHLVideocenterIE(dl)
|
||||||
result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
|
result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['id'], u'999')
|
self.assertEqual(result['id'], '999')
|
||||||
self.assertEqual(result['title'], u'Highlights')
|
self.assertEqual(result['title'], 'Highlights')
|
||||||
self.assertEqual(len(result['entries']), 12)
|
self.assertEqual(len(result['entries']), 12)
|
||||||
|
|
||||||
def test_bambuser_channel(self):
|
def test_bambuser_channel(self):
|
||||||
@@ -129,7 +138,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = BambuserChannelIE(dl)
|
ie = BambuserChannelIE(dl)
|
||||||
result = ie.extract('http://bambuser.com/channel/pixelversity')
|
result = ie.extract('http://bambuser.com/channel/pixelversity')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'pixelversity')
|
self.assertEqual(result['title'], 'pixelversity')
|
||||||
self.assertTrue(len(result['entries']) >= 60)
|
self.assertTrue(len(result['entries']) >= 60)
|
||||||
|
|
||||||
def test_bandcamp_album(self):
|
def test_bandcamp_album(self):
|
||||||
@@ -137,7 +146,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = BandcampAlbumIE(dl)
|
ie = BandcampAlbumIE(dl)
|
||||||
result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
|
result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], u'Nightmare Night EP')
|
self.assertEqual(result['title'], 'Nightmare Night EP')
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
self.assertTrue(len(result['entries']) >= 4)
|
||||||
|
|
||||||
def test_smotri_community(self):
|
def test_smotri_community(self):
|
||||||
@@ -145,8 +154,8 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = SmotriCommunityIE(dl)
|
ie = SmotriCommunityIE(dl)
|
||||||
result = ie.extract('http://smotri.com/community/video/kommuna')
|
result = ie.extract('http://smotri.com/community/video/kommuna')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['id'], u'kommuna')
|
self.assertEqual(result['id'], 'kommuna')
|
||||||
self.assertEqual(result['title'], u'КПРФ')
|
self.assertEqual(result['title'], 'КПРФ')
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
self.assertTrue(len(result['entries']) >= 4)
|
||||||
|
|
||||||
def test_smotri_user(self):
|
def test_smotri_user(self):
|
||||||
@@ -154,9 +163,92 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
ie = SmotriUserIE(dl)
|
ie = SmotriUserIE(dl)
|
||||||
result = ie.extract('http://smotri.com/user/inspector')
|
result = ie.extract('http://smotri.com/user/inspector')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['id'], u'inspector')
|
self.assertEqual(result['id'], 'inspector')
|
||||||
self.assertEqual(result['title'], u'Inspector')
|
self.assertEqual(result['title'], 'Inspector')
|
||||||
self.assertTrue(len(result['entries']) >= 9)
|
self.assertTrue(len(result['entries']) >= 9)
|
||||||
|
|
||||||
|
def test_AcademicEarthCourse(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = AcademicEarthCourseIE(dl)
|
||||||
|
result = ie.extract('http://academicearth.org/courses/building-dynamic-websites/')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'building-dynamic-websites')
|
||||||
|
self.assertEqual(result['title'], 'Building Dynamic Websites')
|
||||||
|
self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
|
||||||
|
self.assertEqual(len(result['entries']), 10)
|
||||||
|
|
||||||
|
def test_ivi_compilation(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = IviCompilationIE(dl)
|
||||||
|
result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'dezhurnyi_angel')
|
||||||
|
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)')
|
||||||
|
self.assertTrue(len(result['entries']) >= 36)
|
||||||
|
|
||||||
|
def test_ivi_compilation_season(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = IviCompilationIE(dl)
|
||||||
|
result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'dezhurnyi_angel/season2')
|
||||||
|
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон')
|
||||||
|
self.assertTrue(len(result['entries']) >= 20)
|
||||||
|
|
||||||
|
def test_imdb_list(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = ImdbListIE(dl)
|
||||||
|
result = ie.extract('http://www.imdb.com/list/JFs9NWw6XI0')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'JFs9NWw6XI0')
|
||||||
|
self.assertEqual(result['title'], 'March 23, 2012 Releases')
|
||||||
|
self.assertEqual(len(result['entries']), 7)
|
||||||
|
|
||||||
|
def test_khanacademy_topic(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = KhanAcademyIE(dl)
|
||||||
|
result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'cryptography')
|
||||||
|
self.assertEqual(result['title'], 'Journey into cryptography')
|
||||||
|
self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?')
|
||||||
|
self.assertTrue(len(result['entries']) >= 3)
|
||||||
|
|
||||||
|
def test_EveryonesMixtape(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = EveryonesMixtapeIE(dl)
|
||||||
|
result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'm7m0jJAbMQi')
|
||||||
|
self.assertEqual(result['title'], 'Driving')
|
||||||
|
self.assertEqual(len(result['entries']), 24)
|
||||||
|
|
||||||
|
def test_rutube_channel(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = RutubeChannelIE(dl)
|
||||||
|
result = ie.extract('http://rutube.ru/tags/video/1409')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], '1409')
|
||||||
|
self.assertTrue(len(result['entries']) >= 34)
|
||||||
|
|
||||||
|
def test_multiple_brightcove_videos(self):
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/2283
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = GenericIE(dl)
|
||||||
|
result = ie.extract('http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'always-never-nuclear-command-and-control')
|
||||||
|
self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
|
||||||
|
self.assertEqual(len(result['entries']), 3)
|
||||||
|
|
||||||
|
def test_GoogleSearch(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = GoogleSearchIE(dl)
|
||||||
|
result = ie.extract('gvsearch15:python language')
|
||||||
|
self.assertIsPlaylist(result)
|
||||||
|
self.assertEqual(result['id'], 'python language')
|
||||||
|
self.assertEqual(result['title'], 'python language')
|
||||||
|
self.assertTrue(len(result['entries']) == 15)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -10,9 +10,11 @@ from test.helper import FakeYDL, md5
|
|||||||
|
|
||||||
|
|
||||||
from youtube_dl.extractor import (
|
from youtube_dl.extractor import (
|
||||||
|
BlipTVIE,
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
TEDIE,
|
TEDIE,
|
||||||
|
VimeoIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -36,10 +38,6 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
url = 'QRS8MkLhQmM'
|
url = 'QRS8MkLhQmM'
|
||||||
IE = YoutubeIE
|
IE = YoutubeIE
|
||||||
|
|
||||||
def getSubtitles(self):
|
|
||||||
info_dict = self.getInfoDict()
|
|
||||||
return info_dict[0]['subtitles']
|
|
||||||
|
|
||||||
def test_youtube_no_writesubtitles(self):
|
def test_youtube_no_writesubtitles(self):
|
||||||
self.DL.params['writesubtitles'] = False
|
self.DL.params['writesubtitles'] = False
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
@@ -171,13 +169,13 @@ class TestTedSubtitles(BaseTestSubtitles):
|
|||||||
def test_subtitles(self):
|
def test_subtitles(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(md5(subtitles['en']), '2154f31ff9b9f89a0aa671537559c21d')
|
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
||||||
|
|
||||||
def test_subtitles_lang(self):
|
def test_subtitles_lang(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['subtitleslangs'] = ['fr']
|
self.DL.params['subtitleslangs'] = ['fr']
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(md5(subtitles['fr']), '7616cbc6df20ec2c1204083c83871cf6')
|
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
@@ -206,5 +204,80 @@ class TestTedSubtitles(BaseTestSubtitles):
|
|||||||
for lang in langs:
|
for lang in langs:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
|
class TestBlipTVSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://blip.tv/a/a-6603250'
|
||||||
|
IE = BlipTVIE
|
||||||
|
|
||||||
|
def test_list_subtitles(self):
|
||||||
|
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||||
|
self.DL.params['listsubtitles'] = True
|
||||||
|
info_dict = self.getInfoDict()
|
||||||
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), '5b75c300af65fe4476dff79478bb93e4')
|
||||||
|
|
||||||
|
|
||||||
|
class TestVimeoSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://vimeo.com/76979871'
|
||||||
|
IE = VimeoIE
|
||||||
|
|
||||||
|
def test_no_writesubtitles(self):
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(subtitles, None)
|
||||||
|
|
||||||
|
def test_subtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
|
||||||
|
|
||||||
|
def test_subtitles_lang(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['subtitleslangs'] = ['fr']
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
||||||
|
|
||||||
|
def test_list_subtitles(self):
|
||||||
|
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||||
|
self.DL.params['listsubtitles'] = True
|
||||||
|
info_dict = self.getInfoDict()
|
||||||
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
|
def test_automatic_captions(self):
|
||||||
|
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||||
|
self.DL.params['writeautomaticsub'] = True
|
||||||
|
self.DL.params['subtitleslang'] = ['en']
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertTrue(len(subtitles.keys()) == 0)
|
||||||
|
|
||||||
|
def test_nosubtitles(self):
|
||||||
|
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
||||||
|
self.url = 'http://vimeo.com/56015672'
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(len(subtitles), 0)
|
||||||
|
|
||||||
|
def test_multiple_langs(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
langs = ['es', 'fr', 'de']
|
||||||
|
self.DL.params['subtitleslangs'] = langs
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
for lang in langs:
|
||||||
|
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
47
test/test_unicode_literals.py
Normal file
47
test/test_unicode_literals.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
IGNORED_FILES = [
|
||||||
|
'setup.py', # http://bugs.python.org/issue13943
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class TestUnicodeLiterals(unittest.TestCase):
|
||||||
|
def test_all_files(self):
|
||||||
|
print('Skipping this test (not yet fully implemented)')
|
||||||
|
return
|
||||||
|
|
||||||
|
for dirpath, _, filenames in os.walk(rootDir):
|
||||||
|
for basename in filenames:
|
||||||
|
if not basename.endswith('.py'):
|
||||||
|
continue
|
||||||
|
if basename in IGNORED_FILES:
|
||||||
|
continue
|
||||||
|
|
||||||
|
fn = os.path.join(dirpath, basename)
|
||||||
|
with io.open(fn, encoding='utf-8') as inf:
|
||||||
|
code = inf.read()
|
||||||
|
|
||||||
|
if "'" not in code and '"' not in code:
|
||||||
|
continue
|
||||||
|
imps = 'from __future__ import unicode_literals'
|
||||||
|
self.assertTrue(
|
||||||
|
imps in code,
|
||||||
|
' %s missing in %s' % (imps, fn))
|
||||||
|
|
||||||
|
m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
|
||||||
|
if m is not None:
|
||||||
|
self.assertTrue(
|
||||||
|
m is None,
|
||||||
|
'u present in %s, around %s' % (
|
||||||
|
fn, code[m.start() - 10:m.end() + 10]))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -13,20 +13,25 @@ import xml.etree.ElementTree
|
|||||||
|
|
||||||
#from youtube_dl.utils import htmlentity_transform
|
#from youtube_dl.utils import htmlentity_transform
|
||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
timeconvert,
|
|
||||||
sanitize_filename,
|
|
||||||
unescapeHTML,
|
|
||||||
orderedSet,
|
|
||||||
DateRange,
|
DateRange,
|
||||||
unified_strdate,
|
|
||||||
find_xpath_attr,
|
|
||||||
get_meta_content,
|
|
||||||
xpath_with_ns,
|
|
||||||
smuggle_url,
|
|
||||||
unsmuggle_url,
|
|
||||||
shell_quote,
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
find_xpath_attr,
|
||||||
|
fix_xml_ampersands,
|
||||||
|
get_meta_content,
|
||||||
|
orderedSet,
|
||||||
|
PagedList,
|
||||||
|
parse_duration,
|
||||||
|
sanitize_filename,
|
||||||
|
shell_quote,
|
||||||
|
smuggle_url,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
struct_unpack,
|
||||||
|
timeconvert,
|
||||||
|
unescapeHTML,
|
||||||
|
unified_strdate,
|
||||||
|
unsmuggle_url,
|
||||||
|
url_basename,
|
||||||
|
xpath_with_ns,
|
||||||
)
|
)
|
||||||
|
|
||||||
if sys.version_info < (3, 0):
|
if sys.version_info < (3, 0):
|
||||||
@@ -123,6 +128,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
|
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
|
||||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||||
|
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||||
|
|
||||||
def test_find_xpath_attr(self):
|
def test_find_xpath_attr(self):
|
||||||
testxml = u'''<root>
|
testxml = u'''<root>
|
||||||
@@ -181,6 +187,68 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(str_to_int('123,456'), 123456)
|
self.assertEqual(str_to_int('123,456'), 123456)
|
||||||
self.assertEqual(str_to_int('123.456'), 123456)
|
self.assertEqual(str_to_int('123.456'), 123456)
|
||||||
|
|
||||||
|
def test_url_basename(self):
|
||||||
|
self.assertEqual(url_basename(u'http://foo.de/'), u'')
|
||||||
|
self.assertEqual(url_basename(u'http://foo.de/bar/baz'), u'baz')
|
||||||
|
self.assertEqual(url_basename(u'http://foo.de/bar/baz?x=y'), u'baz')
|
||||||
|
self.assertEqual(url_basename(u'http://foo.de/bar/baz#x=y'), u'baz')
|
||||||
|
self.assertEqual(url_basename(u'http://foo.de/bar/baz/'), u'baz')
|
||||||
|
self.assertEqual(
|
||||||
|
url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
|
||||||
|
u'trailer.mp4')
|
||||||
|
|
||||||
|
def test_parse_duration(self):
|
||||||
|
self.assertEqual(parse_duration(None), None)
|
||||||
|
self.assertEqual(parse_duration('1'), 1)
|
||||||
|
self.assertEqual(parse_duration('1337:12'), 80232)
|
||||||
|
self.assertEqual(parse_duration('9:12:43'), 33163)
|
||||||
|
self.assertEqual(parse_duration('12:00'), 720)
|
||||||
|
self.assertEqual(parse_duration('00:01:01'), 61)
|
||||||
|
self.assertEqual(parse_duration('x:y'), None)
|
||||||
|
self.assertEqual(parse_duration('3h11m53s'), 11513)
|
||||||
|
self.assertEqual(parse_duration('62m45s'), 3765)
|
||||||
|
self.assertEqual(parse_duration('6m59s'), 419)
|
||||||
|
self.assertEqual(parse_duration('49s'), 49)
|
||||||
|
self.assertEqual(parse_duration('0h0m0s'), 0)
|
||||||
|
self.assertEqual(parse_duration('0m0s'), 0)
|
||||||
|
self.assertEqual(parse_duration('0s'), 0)
|
||||||
|
|
||||||
|
def test_fix_xml_ampersands(self):
|
||||||
|
self.assertEqual(
|
||||||
|
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
|
||||||
|
self.assertEqual(
|
||||||
|
fix_xml_ampersands('"&x=y&wrong;&z=a'),
|
||||||
|
'"&x=y&wrong;&z=a')
|
||||||
|
self.assertEqual(
|
||||||
|
fix_xml_ampersands('&'><"'),
|
||||||
|
'&'><"')
|
||||||
|
self.assertEqual(
|
||||||
|
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
|
||||||
|
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
|
||||||
|
|
||||||
|
def test_paged_list(self):
|
||||||
|
def testPL(size, pagesize, sliceargs, expected):
|
||||||
|
def get_page(pagenum):
|
||||||
|
firstid = pagenum * pagesize
|
||||||
|
upto = min(size, pagenum * pagesize + pagesize)
|
||||||
|
for i in range(firstid, upto):
|
||||||
|
yield i
|
||||||
|
|
||||||
|
pl = PagedList(get_page, pagesize)
|
||||||
|
got = pl.getslice(*sliceargs)
|
||||||
|
self.assertEqual(got, expected)
|
||||||
|
|
||||||
|
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
||||||
|
testPL(5, 2, (1,), [1, 2, 3, 4])
|
||||||
|
testPL(5, 2, (2,), [2, 3, 4])
|
||||||
|
testPL(5, 2, (4,), [4])
|
||||||
|
testPL(5, 2, (0, 3), [0, 1, 2])
|
||||||
|
testPL(5, 2, (1, 4), [1, 2, 3])
|
||||||
|
testPL(5, 2, (2, 99), [2, 3, 4])
|
||||||
|
testPL(5, 2, (20, 99), [])
|
||||||
|
|
||||||
|
def test_struct_unpack(self):
|
||||||
|
self.assertEqual(struct_unpack(u'!B', b'\x00'), (0,))
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], 'ytdl test PL')
|
self.assertEqual(result['title'], 'ytdl test PL')
|
||||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
||||||
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
||||||
|
|
||||||
def test_youtube_playlist_noplaylist(self):
|
def test_youtube_playlist_noplaylist(self):
|
||||||
@@ -39,7 +39,7 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||||
self.assertEqual(result['_type'], 'url')
|
self.assertEqual(result['_type'], 'url')
|
||||||
self.assertEqual(YoutubeIE()._extract_id(result['url']), 'FXxLjLQi3Fg')
|
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||||
|
|
||||||
def test_issue_673(self):
|
def test_issue_673(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
@@ -59,7 +59,7 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
||||||
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
||||||
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
||||||
|
|
||||||
@@ -76,9 +76,9 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
# TODO find a > 100 (paginating?) videos course
|
# TODO find a > 100 (paginating?) videos course
|
||||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
entries = result['entries']
|
entries = result['entries']
|
||||||
self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||||
self.assertEqual(len(entries), 25)
|
self.assertEqual(len(entries), 25)
|
||||||
self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||||
|
|
||||||
def test_youtube_channel(self):
|
def test_youtube_channel(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
@@ -117,10 +117,17 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
original_video = entries[0]
|
original_video = entries[0]
|
||||||
self.assertEqual(original_video['id'], 'rjFaenf1T-Y')
|
self.assertEqual(original_video['id'], 'rjFaenf1T-Y')
|
||||||
|
|
||||||
|
def test_youtube_toptracks(self):
|
||||||
|
dl = FakeYDL()
|
||||||
|
ie = YoutubePlaylistIE(dl)
|
||||||
|
result = ie.extract('https://www.youtube.com/playlist?list=MCUS')
|
||||||
|
entries = result['entries']
|
||||||
|
self.assertEqual(len(entries), 100)
|
||||||
|
|
||||||
def test_youtube_toplist(self):
|
def test_youtube_toplist(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubeTopListIE(dl)
|
ie = YoutubeTopListIE(dl)
|
||||||
result = ie.extract('yttoplist:music:Top Tracks')
|
result = ie.extract('yttoplist:music:Trending')
|
||||||
entries = result['entries']
|
entries = result['entries']
|
||||||
self.assertTrue(len(entries) >= 5)
|
self.assertTrue(len(entries) >= 5)
|
||||||
|
|
||||||
|
|||||||
@@ -28,10 +28,10 @@ _TESTS = [
|
|||||||
u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
|
u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
u'https://s.ytimg.com/yts/swfbin/watch_as3-vflg5GhxU.swf',
|
u'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
|
||||||
u'swf',
|
u'js',
|
||||||
82,
|
90,
|
||||||
u':/.-,+*)=\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBAzyxw>utsrqponmlkjihgfedcba987654321'
|
u']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
24
youtube-dl.plugin.zsh
Normal file
24
youtube-dl.plugin.zsh
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# This allows the youtube-dl command to be installed in ZSH using antigen.
|
||||||
|
# Antigen is a bundle manager. It allows you to enhance the functionality of
|
||||||
|
# your zsh session by installing bundles and themes easily.
|
||||||
|
|
||||||
|
# Antigen documentation:
|
||||||
|
# http://antigen.sharats.me/
|
||||||
|
# https://github.com/zsh-users/antigen
|
||||||
|
|
||||||
|
# Install youtube-dl:
|
||||||
|
# antigen bundle rg3/youtube-dl
|
||||||
|
# Bundles installed by antigen are available for use immediately.
|
||||||
|
|
||||||
|
# Update youtube-dl (and all other antigen bundles):
|
||||||
|
# antigen update
|
||||||
|
|
||||||
|
# The antigen command will download the git repository to a folder and then
|
||||||
|
# execute an enabling script (this file). The complete process for loading the
|
||||||
|
# code is documented here:
|
||||||
|
# https://github.com/zsh-users/antigen#notes-on-writing-plugins
|
||||||
|
|
||||||
|
# This specific script just aliases youtube-dl to the python script that this
|
||||||
|
# library provides. This requires updating the PYTHONPATH to ensure that the
|
||||||
|
# full set of code can be located.
|
||||||
|
alias youtube-dl="PYTHONPATH=$(dirname $0) $(dirname $0)/bin/youtube-dl"
|
||||||
@@ -1,724 +1,12 @@
|
|||||||
import os
|
# Legacy file for backwards compatibility, use youtube_dl.downloader instead!
|
||||||
import re
|
from .downloader import FileDownloader as RealFileDownloader
|
||||||
import subprocess
|
from .downloader import get_suitable_downloader
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
from .utils import (
|
|
||||||
compat_urllib_error,
|
|
||||||
compat_urllib_request,
|
|
||||||
ContentTooShortError,
|
|
||||||
determine_ext,
|
|
||||||
encodeFilename,
|
|
||||||
format_bytes,
|
|
||||||
sanitize_open,
|
|
||||||
timeconvert,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class FileDownloader(object):
|
|
||||||
"""File Downloader class.
|
|
||||||
|
|
||||||
File downloader objects are the ones responsible of downloading the
|
|
||||||
actual video file and writing it to disk.
|
|
||||||
|
|
||||||
File downloaders accept a lot of parameters. In order not to saturate
|
|
||||||
the object constructor with arguments, it receives a dictionary of
|
|
||||||
options instead.
|
|
||||||
|
|
||||||
Available options:
|
|
||||||
|
|
||||||
verbose: Print additional info to stdout.
|
|
||||||
quiet: Do not print messages to stdout.
|
|
||||||
ratelimit: Download speed limit, in bytes/sec.
|
|
||||||
retries: Number of times to retry for HTTP error 5xx
|
|
||||||
buffersize: Size of download buffer in bytes.
|
|
||||||
noresizebuffer: Do not automatically resize the download buffer.
|
|
||||||
continuedl: Try to continue downloads if possible.
|
|
||||||
noprogress: Do not print the progress bar.
|
|
||||||
logtostderr: Log messages to stderr instead of stdout.
|
|
||||||
consoletitle: Display progress in console window's titlebar.
|
|
||||||
nopart: Do not use temporary .part files.
|
|
||||||
updatetime: Use the Last-modified header to set output file timestamps.
|
|
||||||
test: Download only first bytes to test the downloader.
|
|
||||||
min_filesize: Skip files smaller than this size
|
|
||||||
max_filesize: Skip files larger than this size
|
|
||||||
"""
|
|
||||||
|
|
||||||
params = None
|
|
||||||
|
|
||||||
def __init__(self, ydl, params):
|
|
||||||
"""Create a FileDownloader object with the given options."""
|
|
||||||
self.ydl = ydl
|
|
||||||
self._progress_hooks = []
|
|
||||||
self.params = params
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def format_seconds(seconds):
|
|
||||||
(mins, secs) = divmod(seconds, 60)
|
|
||||||
(hours, mins) = divmod(mins, 60)
|
|
||||||
if hours > 99:
|
|
||||||
return '--:--:--'
|
|
||||||
if hours == 0:
|
|
||||||
return '%02d:%02d' % (mins, secs)
|
|
||||||
else:
|
|
||||||
return '%02d:%02d:%02d' % (hours, mins, secs)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def calc_percent(byte_counter, data_len):
|
|
||||||
if data_len is None:
|
|
||||||
return None
|
|
||||||
return float(byte_counter) / float(data_len) * 100.0
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def format_percent(percent):
|
|
||||||
if percent is None:
|
|
||||||
return '---.-%'
|
|
||||||
return '%6s' % ('%3.1f%%' % percent)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def calc_eta(start, now, total, current):
|
|
||||||
if total is None:
|
|
||||||
return None
|
|
||||||
dif = now - start
|
|
||||||
if current == 0 or dif < 0.001: # One millisecond
|
|
||||||
return None
|
|
||||||
rate = float(current) / dif
|
|
||||||
return int((float(total) - float(current)) / rate)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def format_eta(eta):
|
|
||||||
if eta is None:
|
|
||||||
return '--:--'
|
|
||||||
return FileDownloader.format_seconds(eta)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def calc_speed(start, now, bytes):
|
|
||||||
dif = now - start
|
|
||||||
if bytes == 0 or dif < 0.001: # One millisecond
|
|
||||||
return None
|
|
||||||
return float(bytes) / dif
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def format_speed(speed):
|
|
||||||
if speed is None:
|
|
||||||
return '%10s' % '---b/s'
|
|
||||||
return '%10s' % ('%s/s' % format_bytes(speed))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def best_block_size(elapsed_time, bytes):
|
|
||||||
new_min = max(bytes / 2.0, 1.0)
|
|
||||||
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
|
||||||
if elapsed_time < 0.001:
|
|
||||||
return int(new_max)
|
|
||||||
rate = bytes / elapsed_time
|
|
||||||
if rate > new_max:
|
|
||||||
return int(new_max)
|
|
||||||
if rate < new_min:
|
|
||||||
return int(new_min)
|
|
||||||
return int(rate)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_bytes(bytestr):
|
|
||||||
"""Parse a string indicating a byte quantity into an integer."""
|
|
||||||
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
|
|
||||||
if matchobj is None:
|
|
||||||
return None
|
|
||||||
number = float(matchobj.group(1))
|
|
||||||
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
|
|
||||||
return int(round(number * multiplier))
|
|
||||||
|
|
||||||
def to_screen(self, *args, **kargs):
|
|
||||||
self.ydl.to_screen(*args, **kargs)
|
|
||||||
|
|
||||||
def to_stderr(self, message):
|
|
||||||
self.ydl.to_screen(message)
|
|
||||||
|
|
||||||
def to_console_title(self, message):
|
|
||||||
self.ydl.to_console_title(message)
|
|
||||||
|
|
||||||
def trouble(self, *args, **kargs):
|
|
||||||
self.ydl.trouble(*args, **kargs)
|
|
||||||
|
|
||||||
def report_warning(self, *args, **kargs):
|
|
||||||
self.ydl.report_warning(*args, **kargs)
|
|
||||||
|
|
||||||
def report_error(self, *args, **kargs):
|
|
||||||
self.ydl.report_error(*args, **kargs)
|
|
||||||
|
|
||||||
def slow_down(self, start_time, byte_counter):
|
|
||||||
"""Sleep if the download speed is over the rate limit."""
|
|
||||||
rate_limit = self.params.get('ratelimit', None)
|
|
||||||
if rate_limit is None or byte_counter == 0:
|
|
||||||
return
|
|
||||||
now = time.time()
|
|
||||||
elapsed = now - start_time
|
|
||||||
if elapsed <= 0.0:
|
|
||||||
return
|
|
||||||
speed = float(byte_counter) / elapsed
|
|
||||||
if speed > rate_limit:
|
|
||||||
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
|
|
||||||
|
|
||||||
def temp_name(self, filename):
|
|
||||||
"""Returns a temporary filename for the given filename."""
|
|
||||||
if self.params.get('nopart', False) or filename == u'-' or \
|
|
||||||
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
|
||||||
return filename
|
|
||||||
return filename + u'.part'
|
|
||||||
|
|
||||||
def undo_temp_name(self, filename):
|
|
||||||
if filename.endswith(u'.part'):
|
|
||||||
return filename[:-len(u'.part')]
|
|
||||||
return filename
|
|
||||||
|
|
||||||
def try_rename(self, old_filename, new_filename):
|
|
||||||
try:
|
|
||||||
if old_filename == new_filename:
|
|
||||||
return
|
|
||||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
|
||||||
except (IOError, OSError):
|
|
||||||
self.report_error(u'unable to rename file')
|
|
||||||
|
|
||||||
def try_utime(self, filename, last_modified_hdr):
|
|
||||||
"""Try to set the last-modified time of the given file."""
|
|
||||||
if last_modified_hdr is None:
|
|
||||||
return
|
|
||||||
if not os.path.isfile(encodeFilename(filename)):
|
|
||||||
return
|
|
||||||
timestr = last_modified_hdr
|
|
||||||
if timestr is None:
|
|
||||||
return
|
|
||||||
filetime = timeconvert(timestr)
|
|
||||||
if filetime is None:
|
|
||||||
return filetime
|
|
||||||
# Ignore obviously invalid dates
|
|
||||||
if filetime == 0:
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
os.utime(filename, (time.time(), filetime))
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return filetime
|
|
||||||
|
|
||||||
def report_destination(self, filename):
|
|
||||||
"""Report destination filename."""
|
|
||||||
self.to_screen(u'[download] Destination: ' + filename)
|
|
||||||
|
|
||||||
def _report_progress_status(self, msg, is_last_line=False):
|
|
||||||
fullmsg = u'[download] ' + msg
|
|
||||||
if self.params.get('progress_with_newline', False):
|
|
||||||
self.to_screen(fullmsg)
|
|
||||||
else:
|
|
||||||
if os.name == 'nt':
|
|
||||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
|
||||||
0)
|
|
||||||
if prev_len > len(fullmsg):
|
|
||||||
fullmsg += u' ' * (prev_len - len(fullmsg))
|
|
||||||
self._report_progress_prev_line_length = len(fullmsg)
|
|
||||||
clear_line = u'\r'
|
|
||||||
else:
|
|
||||||
clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
|
|
||||||
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
|
||||||
self.to_console_title(u'youtube-dl ' + msg)
|
|
||||||
|
|
||||||
def report_progress(self, percent, data_len_str, speed, eta):
|
|
||||||
"""Report download progress."""
|
|
||||||
if self.params.get('noprogress', False):
|
|
||||||
return
|
|
||||||
if eta is not None:
|
|
||||||
eta_str = self.format_eta(eta)
|
|
||||||
else:
|
|
||||||
eta_str = 'Unknown ETA'
|
|
||||||
if percent is not None:
|
|
||||||
percent_str = self.format_percent(percent)
|
|
||||||
else:
|
|
||||||
percent_str = 'Unknown %'
|
|
||||||
speed_str = self.format_speed(speed)
|
|
||||||
|
|
||||||
msg = (u'%s of %s at %s ETA %s' %
|
|
||||||
(percent_str, data_len_str, speed_str, eta_str))
|
|
||||||
self._report_progress_status(msg)
|
|
||||||
|
|
||||||
def report_progress_live_stream(self, downloaded_data_len, speed, elapsed):
|
|
||||||
if self.params.get('noprogress', False):
|
|
||||||
return
|
|
||||||
downloaded_str = format_bytes(downloaded_data_len)
|
|
||||||
speed_str = self.format_speed(speed)
|
|
||||||
elapsed_str = FileDownloader.format_seconds(elapsed)
|
|
||||||
msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
|
|
||||||
self._report_progress_status(msg)
|
|
||||||
|
|
||||||
def report_finish(self, data_len_str, tot_time):
|
|
||||||
"""Report download finished."""
|
|
||||||
if self.params.get('noprogress', False):
|
|
||||||
self.to_screen(u'[download] Download completed')
|
|
||||||
else:
|
|
||||||
self._report_progress_status(
|
|
||||||
(u'100%% of %s in %s' %
|
|
||||||
(data_len_str, self.format_seconds(tot_time))),
|
|
||||||
is_last_line=True)
|
|
||||||
|
|
||||||
def report_resuming_byte(self, resume_len):
|
|
||||||
"""Report attempt to resume at given byte."""
|
|
||||||
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
|
|
||||||
|
|
||||||
def report_retry(self, count, retries):
|
|
||||||
"""Report retry in case of HTTP error 5xx"""
|
|
||||||
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
|
||||||
"""Report file has already been fully downloaded."""
|
|
||||||
try:
|
|
||||||
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
self.to_screen(u'[download] The file has already been downloaded')
|
|
||||||
|
|
||||||
def report_unable_to_resume(self):
|
|
||||||
"""Report it was impossible to resume download."""
|
|
||||||
self.to_screen(u'[download] Unable to resume')
|
|
||||||
|
|
||||||
def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path, tc_url, live, conn):
|
|
||||||
def run_rtmpdump(args):
|
|
||||||
start = time.time()
|
|
||||||
resume_percent = None
|
|
||||||
resume_downloaded_data_len = None
|
|
||||||
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
|
|
||||||
cursor_in_new_line = True
|
|
||||||
proc_stderr_closed = False
|
|
||||||
while not proc_stderr_closed:
|
|
||||||
# read line from stderr
|
|
||||||
line = u''
|
|
||||||
while True:
|
|
||||||
char = proc.stderr.read(1)
|
|
||||||
if not char:
|
|
||||||
proc_stderr_closed = True
|
|
||||||
break
|
|
||||||
if char in [b'\r', b'\n']:
|
|
||||||
break
|
|
||||||
line += char.decode('ascii', 'replace')
|
|
||||||
if not line:
|
|
||||||
# proc_stderr_closed is True
|
|
||||||
continue
|
|
||||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
|
||||||
if mobj:
|
|
||||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
|
||||||
percent = float(mobj.group(2))
|
|
||||||
if not resume_percent:
|
|
||||||
resume_percent = percent
|
|
||||||
resume_downloaded_data_len = downloaded_data_len
|
|
||||||
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
|
|
||||||
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
|
|
||||||
data_len = None
|
|
||||||
if percent > 0:
|
|
||||||
data_len = int(downloaded_data_len * 100 / percent)
|
|
||||||
data_len_str = u'~' + format_bytes(data_len)
|
|
||||||
self.report_progress(percent, data_len_str, speed, eta)
|
|
||||||
cursor_in_new_line = False
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': downloaded_data_len,
|
|
||||||
'total_bytes': data_len,
|
|
||||||
'tmpfilename': tmpfilename,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'downloading',
|
|
||||||
'eta': eta,
|
|
||||||
'speed': speed,
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
# no percent for live streams
|
|
||||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
|
||||||
if mobj:
|
|
||||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
|
||||||
time_now = time.time()
|
|
||||||
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
|
||||||
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
|
||||||
cursor_in_new_line = False
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': downloaded_data_len,
|
|
||||||
'tmpfilename': tmpfilename,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'downloading',
|
|
||||||
'speed': speed,
|
|
||||||
})
|
|
||||||
elif self.params.get('verbose', False):
|
|
||||||
if not cursor_in_new_line:
|
|
||||||
self.to_screen(u'')
|
|
||||||
cursor_in_new_line = True
|
|
||||||
self.to_screen(u'[rtmpdump] '+line)
|
|
||||||
proc.wait()
|
|
||||||
if not cursor_in_new_line:
|
|
||||||
self.to_screen(u'')
|
|
||||||
return proc.returncode
|
|
||||||
|
|
||||||
self.report_destination(filename)
|
|
||||||
tmpfilename = self.temp_name(filename)
|
|
||||||
test = self.params.get('test', False)
|
|
||||||
|
|
||||||
# Check for rtmpdump first
|
|
||||||
try:
|
|
||||||
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
|
||||||
except (OSError, IOError):
|
|
||||||
self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
|
||||||
# the connection was interrumpted and resuming appears to be
|
|
||||||
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
|
||||||
basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
|
|
||||||
if player_url is not None:
|
|
||||||
basic_args += ['--swfVfy', player_url]
|
|
||||||
if page_url is not None:
|
|
||||||
basic_args += ['--pageUrl', page_url]
|
|
||||||
if play_path is not None:
|
|
||||||
basic_args += ['--playpath', play_path]
|
|
||||||
if tc_url is not None:
|
|
||||||
basic_args += ['--tcUrl', url]
|
|
||||||
if test:
|
|
||||||
basic_args += ['--stop', '1']
|
|
||||||
if live:
|
|
||||||
basic_args += ['--live']
|
|
||||||
if conn:
|
|
||||||
basic_args += ['--conn', conn]
|
|
||||||
args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
|
|
||||||
|
|
||||||
if sys.platform == 'win32' and sys.version_info < (3, 0):
|
|
||||||
# Windows subprocess module does not actually support Unicode
|
|
||||||
# on Python 2.x
|
|
||||||
# See http://stackoverflow.com/a/9951851/35070
|
|
||||||
subprocess_encoding = sys.getfilesystemencoding()
|
|
||||||
args = [a.encode(subprocess_encoding, 'ignore') for a in args]
|
|
||||||
else:
|
|
||||||
subprocess_encoding = None
|
|
||||||
|
|
||||||
if self.params.get('verbose', False):
|
|
||||||
if subprocess_encoding:
|
|
||||||
str_args = [
|
|
||||||
a.decode(subprocess_encoding) if isinstance(a, bytes) else a
|
|
||||||
for a in args]
|
|
||||||
else:
|
|
||||||
str_args = args
|
|
||||||
try:
|
|
||||||
import pipes
|
|
||||||
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
|
|
||||||
except ImportError:
|
|
||||||
shell_quote = repr
|
|
||||||
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(str_args))
|
|
||||||
|
|
||||||
retval = run_rtmpdump(args)
|
|
||||||
|
|
||||||
while (retval == 2 or retval == 1) and not test:
|
|
||||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
|
||||||
self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
|
|
||||||
time.sleep(5.0) # This seems to be needed
|
|
||||||
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
|
||||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
|
||||||
if prevsize == cursize and retval == 1:
|
|
||||||
break
|
|
||||||
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
|
|
||||||
if prevsize == cursize and retval == 2 and cursize > 1024:
|
|
||||||
self.to_screen(u'[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
|
|
||||||
retval = 0
|
|
||||||
break
|
|
||||||
if retval == 0 or (test and retval == 2):
|
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
|
||||||
self.to_screen(u'[rtmpdump] %s bytes' % fsize)
|
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': fsize,
|
|
||||||
'total_bytes': fsize,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
})
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.to_stderr(u"\n")
|
|
||||||
self.report_error(u'rtmpdump exited with code %d' % retval)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _download_with_mplayer(self, filename, url):
|
|
||||||
self.report_destination(filename)
|
|
||||||
tmpfilename = self.temp_name(filename)
|
|
||||||
|
|
||||||
args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
|
|
||||||
# Check for mplayer first
|
|
||||||
try:
|
|
||||||
subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
|
||||||
except (OSError, IOError):
|
|
||||||
self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0] )
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Download using mplayer.
|
|
||||||
retval = subprocess.call(args)
|
|
||||||
if retval == 0:
|
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
|
||||||
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
|
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': fsize,
|
|
||||||
'total_bytes': fsize,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
})
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.to_stderr(u"\n")
|
|
||||||
self.report_error(u'mplayer exited with code %d' % retval)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _download_m3u8_with_ffmpeg(self, filename, url):
|
|
||||||
self.report_destination(filename)
|
|
||||||
tmpfilename = self.temp_name(filename)
|
|
||||||
|
|
||||||
args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
|
||||||
'-bsf:a', 'aac_adtstoasc', tmpfilename]
|
|
||||||
|
|
||||||
for program in ['avconv', 'ffmpeg']:
|
|
||||||
try:
|
|
||||||
subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
|
||||||
break
|
|
||||||
except (OSError, IOError):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found')
|
|
||||||
cmd = [program] + args
|
|
||||||
|
|
||||||
retval = subprocess.call(cmd)
|
|
||||||
if retval == 0:
|
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
|
||||||
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
|
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': fsize,
|
|
||||||
'total_bytes': fsize,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
})
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.to_stderr(u"\n")
|
|
||||||
self.report_error(u'ffmpeg exited with code %d' % retval)
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
|
# This class reproduces the old behaviour of FileDownloader
|
||||||
|
class FileDownloader(RealFileDownloader):
|
||||||
def _do_download(self, filename, info_dict):
|
def _do_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params)
|
||||||
|
|
||||||
# Check file already present
|
|
||||||
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
|
|
||||||
self.report_file_already_downloaded(filename)
|
|
||||||
self._hook_progress({
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
'total_bytes': os.path.getsize(encodeFilename(filename)),
|
|
||||||
})
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Attempt to download using rtmpdump
|
|
||||||
if url.startswith('rtmp'):
|
|
||||||
return self._download_with_rtmpdump(filename, url,
|
|
||||||
info_dict.get('player_url', None),
|
|
||||||
info_dict.get('page_url', None),
|
|
||||||
info_dict.get('play_path', None),
|
|
||||||
info_dict.get('tc_url', None),
|
|
||||||
info_dict.get('rtmp_live', False),
|
|
||||||
info_dict.get('rtmp_conn', None))
|
|
||||||
|
|
||||||
# Attempt to download using mplayer
|
|
||||||
if url.startswith('mms') or url.startswith('rtsp'):
|
|
||||||
return self._download_with_mplayer(filename, url)
|
|
||||||
|
|
||||||
# m3u8 manifest are downloaded with ffmpeg
|
|
||||||
if determine_ext(url) == u'm3u8':
|
|
||||||
return self._download_m3u8_with_ffmpeg(filename, url)
|
|
||||||
|
|
||||||
tmpfilename = self.temp_name(filename)
|
|
||||||
stream = None
|
|
||||||
|
|
||||||
# Do not include the Accept-Encoding header
|
|
||||||
headers = {'Youtubedl-no-compression': 'True'}
|
|
||||||
if 'user_agent' in info_dict:
|
|
||||||
headers['Youtubedl-user-agent'] = info_dict['user_agent']
|
|
||||||
basic_request = compat_urllib_request.Request(url, None, headers)
|
|
||||||
request = compat_urllib_request.Request(url, None, headers)
|
|
||||||
|
|
||||||
if self.params.get('test', False):
|
|
||||||
request.add_header('Range','bytes=0-10240')
|
|
||||||
|
|
||||||
# Establish possible resume length
|
|
||||||
if os.path.isfile(encodeFilename(tmpfilename)):
|
|
||||||
resume_len = os.path.getsize(encodeFilename(tmpfilename))
|
|
||||||
else:
|
|
||||||
resume_len = 0
|
|
||||||
|
|
||||||
open_mode = 'wb'
|
|
||||||
if resume_len != 0:
|
|
||||||
if self.params.get('continuedl', False):
|
|
||||||
self.report_resuming_byte(resume_len)
|
|
||||||
request.add_header('Range','bytes=%d-' % resume_len)
|
|
||||||
open_mode = 'ab'
|
|
||||||
else:
|
|
||||||
resume_len = 0
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
retries = self.params.get('retries', 0)
|
|
||||||
while count <= retries:
|
|
||||||
# Establish connection
|
|
||||||
try:
|
|
||||||
if count == 0 and 'urlhandle' in info_dict:
|
|
||||||
data = info_dict['urlhandle']
|
|
||||||
data = compat_urllib_request.urlopen(request)
|
|
||||||
break
|
|
||||||
except (compat_urllib_error.HTTPError, ) as err:
|
|
||||||
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
|
||||||
# Unexpected HTTP error
|
|
||||||
raise
|
|
||||||
elif err.code == 416:
|
|
||||||
# Unable to resume (requested range not satisfiable)
|
|
||||||
try:
|
|
||||||
# Open the connection again without the range header
|
|
||||||
data = compat_urllib_request.urlopen(basic_request)
|
|
||||||
content_length = data.info()['Content-Length']
|
|
||||||
except (compat_urllib_error.HTTPError, ) as err:
|
|
||||||
if err.code < 500 or err.code >= 600:
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
# Examine the reported length
|
|
||||||
if (content_length is not None and
|
|
||||||
(resume_len - 100 < int(content_length) < resume_len + 100)):
|
|
||||||
# The file had already been fully downloaded.
|
|
||||||
# Explanation to the above condition: in issue #175 it was revealed that
|
|
||||||
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
|
||||||
# changing the file size slightly and causing problems for some users. So
|
|
||||||
# I decided to implement a suggested change and consider the file
|
|
||||||
# completely downloaded if the file size differs less than 100 bytes from
|
|
||||||
# the one in the hard drive.
|
|
||||||
self.report_file_already_downloaded(filename)
|
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
self._hook_progress({
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
})
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
# The length does not match, we start the download over
|
|
||||||
self.report_unable_to_resume()
|
|
||||||
open_mode = 'wb'
|
|
||||||
break
|
|
||||||
# Retry
|
|
||||||
count += 1
|
|
||||||
if count <= retries:
|
|
||||||
self.report_retry(count, retries)
|
|
||||||
|
|
||||||
if count > retries:
|
|
||||||
self.report_error(u'giving up after %s retries' % retries)
|
|
||||||
return False
|
|
||||||
|
|
||||||
data_len = data.info().get('Content-length', None)
|
|
||||||
if data_len is not None:
|
|
||||||
data_len = int(data_len) + resume_len
|
|
||||||
min_data_len = self.params.get("min_filesize", None)
|
|
||||||
max_data_len = self.params.get("max_filesize", None)
|
|
||||||
if min_data_len is not None and data_len < min_data_len:
|
|
||||||
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
|
||||||
return False
|
|
||||||
if max_data_len is not None and data_len > max_data_len:
|
|
||||||
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
|
||||||
return False
|
|
||||||
|
|
||||||
data_len_str = format_bytes(data_len)
|
|
||||||
byte_counter = 0 + resume_len
|
|
||||||
block_size = self.params.get('buffersize', 1024)
|
|
||||||
start = time.time()
|
|
||||||
while True:
|
|
||||||
# Download and write
|
|
||||||
before = time.time()
|
|
||||||
data_block = data.read(block_size)
|
|
||||||
after = time.time()
|
|
||||||
if len(data_block) == 0:
|
|
||||||
break
|
|
||||||
byte_counter += len(data_block)
|
|
||||||
|
|
||||||
# Open file just in time
|
|
||||||
if stream is None:
|
|
||||||
try:
|
|
||||||
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
|
|
||||||
assert stream is not None
|
|
||||||
filename = self.undo_temp_name(tmpfilename)
|
|
||||||
self.report_destination(filename)
|
|
||||||
except (OSError, IOError) as err:
|
|
||||||
self.report_error(u'unable to open for writing: %s' % str(err))
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
stream.write(data_block)
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
self.to_stderr(u"\n")
|
|
||||||
self.report_error(u'unable to write data: %s' % str(err))
|
|
||||||
return False
|
|
||||||
if not self.params.get('noresizebuffer', False):
|
|
||||||
block_size = self.best_block_size(after - before, len(data_block))
|
|
||||||
|
|
||||||
# Progress message
|
|
||||||
speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
|
|
||||||
if data_len is None:
|
|
||||||
eta = percent = None
|
|
||||||
else:
|
|
||||||
percent = self.calc_percent(byte_counter, data_len)
|
|
||||||
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
|
||||||
self.report_progress(percent, data_len_str, speed, eta)
|
|
||||||
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': byte_counter,
|
|
||||||
'total_bytes': data_len,
|
|
||||||
'tmpfilename': tmpfilename,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'downloading',
|
|
||||||
'eta': eta,
|
|
||||||
'speed': speed,
|
|
||||||
})
|
|
||||||
|
|
||||||
# Apply rate limit
|
|
||||||
self.slow_down(start, byte_counter - resume_len)
|
|
||||||
|
|
||||||
if stream is None:
|
|
||||||
self.to_stderr(u"\n")
|
|
||||||
self.report_error(u'Did not get any data blocks')
|
|
||||||
return False
|
|
||||||
stream.close()
|
|
||||||
self.report_finish(data_len_str, (time.time() - start))
|
|
||||||
if data_len is not None and byte_counter != data_len:
|
|
||||||
raise ContentTooShortError(byte_counter, int(data_len))
|
|
||||||
self.try_rename(tmpfilename, filename)
|
|
||||||
|
|
||||||
# Update file modification time
|
|
||||||
if self.params.get('updatetime', True):
|
|
||||||
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
|
|
||||||
|
|
||||||
self._hook_progress({
|
|
||||||
'downloaded_bytes': byte_counter,
|
|
||||||
'total_bytes': byte_counter,
|
|
||||||
'filename': filename,
|
|
||||||
'status': 'finished',
|
|
||||||
})
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _hook_progress(self, status):
|
|
||||||
for ph in self._progress_hooks:
|
for ph in self._progress_hooks:
|
||||||
ph(status)
|
real_fd.add_progress_hook(ph)
|
||||||
|
return real_fd.download(filename, info_dict)
|
||||||
def add_progress_hook(self, ph):
|
|
||||||
""" ph gets called on download progress, with a dictionary with the entries
|
|
||||||
* filename: The final filename
|
|
||||||
* status: One of "downloading" and "finished"
|
|
||||||
|
|
||||||
It can also have some of the following entries:
|
|
||||||
|
|
||||||
* downloaded_bytes: Bytes on disks
|
|
||||||
* total_bytes: Total bytes, None if unknown
|
|
||||||
* tmpfilename: The filename we're currently writing to
|
|
||||||
* eta: The estimated time in seconds, None if unknown
|
|
||||||
* speed: The download speed in bytes/second, None if unknown
|
|
||||||
|
|
||||||
Hooks are guaranteed to be called at least once (with status "finished")
|
|
||||||
if the download is successful.
|
|
||||||
"""
|
|
||||||
self._progress_hooks.append(ph)
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import, unicode_literals
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import errno
|
import errno
|
||||||
@@ -39,6 +39,7 @@ from .utils import (
|
|||||||
locked_file,
|
locked_file,
|
||||||
make_HTTPS_handler,
|
make_HTTPS_handler,
|
||||||
MaxDownloadsReached,
|
MaxDownloadsReached,
|
||||||
|
PagedList,
|
||||||
PostProcessingError,
|
PostProcessingError,
|
||||||
platform_name,
|
platform_name,
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
@@ -47,12 +48,15 @@ from .utils import (
|
|||||||
subtitles_filename,
|
subtitles_filename,
|
||||||
takewhile_inclusive,
|
takewhile_inclusive,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
|
url_basename,
|
||||||
write_json_file,
|
write_json_file,
|
||||||
write_string,
|
write_string,
|
||||||
YoutubeDLHandler,
|
YoutubeDLHandler,
|
||||||
|
prepend_extension,
|
||||||
)
|
)
|
||||||
from .extractor import get_info_extractor, gen_extractors
|
from .extractor import get_info_extractor, gen_extractors
|
||||||
from .FileDownloader import FileDownloader
|
from .downloader import get_suitable_downloader
|
||||||
|
from .postprocessor import FFmpegMergerPP
|
||||||
from .version import __version__
|
from .version import __version__
|
||||||
|
|
||||||
|
|
||||||
@@ -147,11 +151,19 @@ class YoutubeDL(object):
|
|||||||
socket_timeout: Time to wait for unresponsive hosts, in seconds
|
socket_timeout: Time to wait for unresponsive hosts, in seconds
|
||||||
bidi_workaround: Work around buggy terminals without bidirectional text
|
bidi_workaround: Work around buggy terminals without bidirectional text
|
||||||
support, using fridibi
|
support, using fridibi
|
||||||
|
debug_printtraffic:Print out sent and received HTTP traffic
|
||||||
|
include_ads: Download ads as well
|
||||||
|
default_search: Prepend this string if an input url is not valid.
|
||||||
|
'auto' for elaborate guessing
|
||||||
|
|
||||||
The following parameters are not used by YoutubeDL itself, they are used by
|
The following parameters are not used by YoutubeDL itself, they are used by
|
||||||
the FileDownloader:
|
the FileDownloader:
|
||||||
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
|
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
|
||||||
noresizebuffer, retries, continuedl, noprogress, consoletitle
|
noresizebuffer, retries, continuedl, noprogress, consoletitle
|
||||||
|
|
||||||
|
The following options are used by the post processors:
|
||||||
|
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
|
||||||
|
otherwise prefer avconv.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
params = None
|
params = None
|
||||||
@@ -163,6 +175,8 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
def __init__(self, params=None):
|
def __init__(self, params=None):
|
||||||
"""Create a FileDownloader object with the given options."""
|
"""Create a FileDownloader object with the given options."""
|
||||||
|
if params is None:
|
||||||
|
params = {}
|
||||||
self._ies = []
|
self._ies = []
|
||||||
self._ies_instances = {}
|
self._ies_instances = {}
|
||||||
self._pps = []
|
self._pps = []
|
||||||
@@ -171,7 +185,7 @@ class YoutubeDL(object):
|
|||||||
self._num_downloads = 0
|
self._num_downloads = 0
|
||||||
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
|
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
|
||||||
self._err_file = sys.stderr
|
self._err_file = sys.stderr
|
||||||
self.params = {} if params is None else params
|
self.params = params
|
||||||
|
|
||||||
if params.get('bidi_workaround', False):
|
if params.get('bidi_workaround', False):
|
||||||
try:
|
try:
|
||||||
@@ -182,15 +196,21 @@ class YoutubeDL(object):
|
|||||||
width_args = []
|
width_args = []
|
||||||
else:
|
else:
|
||||||
width_args = ['-w', str(width)]
|
width_args = ['-w', str(width)]
|
||||||
self._fribidi = subprocess.Popen(
|
sp_kwargs = dict(
|
||||||
['fribidi', '-c', 'UTF-8'] + width_args,
|
|
||||||
stdin=subprocess.PIPE,
|
stdin=subprocess.PIPE,
|
||||||
stdout=slave,
|
stdout=slave,
|
||||||
stderr=self._err_file)
|
stderr=self._err_file)
|
||||||
self._fribidi_channel = os.fdopen(master, 'rb')
|
try:
|
||||||
|
self._output_process = subprocess.Popen(
|
||||||
|
['bidiv'] + width_args, **sp_kwargs
|
||||||
|
)
|
||||||
|
except OSError:
|
||||||
|
self._output_process = subprocess.Popen(
|
||||||
|
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
|
||||||
|
self._output_channel = os.fdopen(master, 'rb')
|
||||||
except OSError as ose:
|
except OSError as ose:
|
||||||
if ose.errno == 2:
|
if ose.errno == 2:
|
||||||
self.report_warning(u'Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
|
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@@ -199,15 +219,13 @@ class YoutubeDL(object):
|
|||||||
and not params['restrictfilenames']):
|
and not params['restrictfilenames']):
|
||||||
# On Python 3, the Unicode filesystem API will throw errors (#1474)
|
# On Python 3, the Unicode filesystem API will throw errors (#1474)
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
u'Assuming --restrict-filenames since file system encoding '
|
'Assuming --restrict-filenames since file system encoding '
|
||||||
u'cannot encode all charactes. '
|
'cannot encode all charactes. '
|
||||||
u'Set the LC_ALL environment variable to fix this.')
|
'Set the LC_ALL environment variable to fix this.')
|
||||||
self.params['restrictfilenames'] = True
|
self.params['restrictfilenames'] = True
|
||||||
|
|
||||||
self.fd = FileDownloader(self, self.params)
|
|
||||||
|
|
||||||
if '%(stitle)s' in self.params.get('outtmpl', ''):
|
if '%(stitle)s' in self.params.get('outtmpl', ''):
|
||||||
self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
|
self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
|
||||||
|
|
||||||
self._setup_opener()
|
self._setup_opener()
|
||||||
|
|
||||||
@@ -241,17 +259,22 @@ class YoutubeDL(object):
|
|||||||
self._pps.append(pp)
|
self._pps.append(pp)
|
||||||
pp.set_downloader(self)
|
pp.set_downloader(self)
|
||||||
|
|
||||||
|
def add_progress_hook(self, ph):
|
||||||
|
"""Add the progress hook (currently only for the file downloader)"""
|
||||||
|
self._progress_hooks.append(ph)
|
||||||
|
|
||||||
def _bidi_workaround(self, message):
|
def _bidi_workaround(self, message):
|
||||||
if not hasattr(self, '_fribidi_channel'):
|
if not hasattr(self, '_output_channel'):
|
||||||
return message
|
return message
|
||||||
|
|
||||||
assert type(message) == type(u'')
|
assert hasattr(self, '_output_process')
|
||||||
line_count = message.count(u'\n') + 1
|
assert type(message) == type('')
|
||||||
self._fribidi.stdin.write((message + u'\n').encode('utf-8'))
|
line_count = message.count('\n') + 1
|
||||||
self._fribidi.stdin.flush()
|
self._output_process.stdin.write((message + '\n').encode('utf-8'))
|
||||||
res = u''.join(self._fribidi_channel.readline().decode('utf-8')
|
self._output_process.stdin.flush()
|
||||||
|
res = ''.join(self._output_channel.readline().decode('utf-8')
|
||||||
for _ in range(line_count))
|
for _ in range(line_count))
|
||||||
return res[:-len(u'\n')]
|
return res[:-len('\n')]
|
||||||
|
|
||||||
def to_screen(self, message, skip_eol=False):
|
def to_screen(self, message, skip_eol=False):
|
||||||
"""Print message to stdout if not in quiet mode."""
|
"""Print message to stdout if not in quiet mode."""
|
||||||
@@ -263,19 +286,19 @@ class YoutubeDL(object):
|
|||||||
self.params['logger'].debug(message)
|
self.params['logger'].debug(message)
|
||||||
elif not check_quiet or not self.params.get('quiet', False):
|
elif not check_quiet or not self.params.get('quiet', False):
|
||||||
message = self._bidi_workaround(message)
|
message = self._bidi_workaround(message)
|
||||||
terminator = [u'\n', u''][skip_eol]
|
terminator = ['\n', ''][skip_eol]
|
||||||
output = message + terminator
|
output = message + terminator
|
||||||
|
|
||||||
write_string(output, self._screen_file)
|
write_string(output, self._screen_file)
|
||||||
|
|
||||||
def to_stderr(self, message):
|
def to_stderr(self, message):
|
||||||
"""Print message to stderr."""
|
"""Print message to stderr."""
|
||||||
assert type(message) == type(u'')
|
assert type(message) == type('')
|
||||||
if self.params.get('logger'):
|
if self.params.get('logger'):
|
||||||
self.params['logger'].error(message)
|
self.params['logger'].error(message)
|
||||||
else:
|
else:
|
||||||
message = self._bidi_workaround(message)
|
message = self._bidi_workaround(message)
|
||||||
output = message + u'\n'
|
output = message + '\n'
|
||||||
write_string(output, self._err_file)
|
write_string(output, self._err_file)
|
||||||
|
|
||||||
def to_console_title(self, message):
|
def to_console_title(self, message):
|
||||||
@@ -286,21 +309,21 @@ class YoutubeDL(object):
|
|||||||
# already of type unicode()
|
# already of type unicode()
|
||||||
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
||||||
elif 'TERM' in os.environ:
|
elif 'TERM' in os.environ:
|
||||||
write_string(u'\033]0;%s\007' % message, self._screen_file)
|
write_string('\033]0;%s\007' % message, self._screen_file)
|
||||||
|
|
||||||
def save_console_title(self):
|
def save_console_title(self):
|
||||||
if not self.params.get('consoletitle', False):
|
if not self.params.get('consoletitle', False):
|
||||||
return
|
return
|
||||||
if 'TERM' in os.environ:
|
if 'TERM' in os.environ:
|
||||||
# Save the title on stack
|
# Save the title on stack
|
||||||
write_string(u'\033[22;0t', self._screen_file)
|
write_string('\033[22;0t', self._screen_file)
|
||||||
|
|
||||||
def restore_console_title(self):
|
def restore_console_title(self):
|
||||||
if not self.params.get('consoletitle', False):
|
if not self.params.get('consoletitle', False):
|
||||||
return
|
return
|
||||||
if 'TERM' in os.environ:
|
if 'TERM' in os.environ:
|
||||||
# Restore the title from stack
|
# Restore the title from stack
|
||||||
write_string(u'\033[23;0t', self._screen_file)
|
write_string('\033[23;0t', self._screen_file)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.save_console_title()
|
self.save_console_title()
|
||||||
@@ -326,13 +349,13 @@ class YoutubeDL(object):
|
|||||||
if self.params.get('verbose'):
|
if self.params.get('verbose'):
|
||||||
if tb is None:
|
if tb is None:
|
||||||
if sys.exc_info()[0]: # if .trouble has been called from an except block
|
if sys.exc_info()[0]: # if .trouble has been called from an except block
|
||||||
tb = u''
|
tb = ''
|
||||||
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
||||||
tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
|
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
|
||||||
tb += compat_str(traceback.format_exc())
|
tb += compat_str(traceback.format_exc())
|
||||||
else:
|
else:
|
||||||
tb_data = traceback.format_list(traceback.extract_stack())
|
tb_data = traceback.format_list(traceback.extract_stack())
|
||||||
tb = u''.join(tb_data)
|
tb = ''.join(tb_data)
|
||||||
self.to_stderr(tb)
|
self.to_stderr(tb)
|
||||||
if not self.params.get('ignoreerrors', False):
|
if not self.params.get('ignoreerrors', False):
|
||||||
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
||||||
@@ -348,10 +371,10 @@ class YoutubeDL(object):
|
|||||||
If stderr is a tty file the 'WARNING:' will be colored
|
If stderr is a tty file the 'WARNING:' will be colored
|
||||||
'''
|
'''
|
||||||
if self._err_file.isatty() and os.name != 'nt':
|
if self._err_file.isatty() and os.name != 'nt':
|
||||||
_msg_header = u'\033[0;33mWARNING:\033[0m'
|
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||||
else:
|
else:
|
||||||
_msg_header = u'WARNING:'
|
_msg_header = 'WARNING:'
|
||||||
warning_message = u'%s %s' % (_msg_header, message)
|
warning_message = '%s %s' % (_msg_header, message)
|
||||||
self.to_stderr(warning_message)
|
self.to_stderr(warning_message)
|
||||||
|
|
||||||
def report_error(self, message, tb=None):
|
def report_error(self, message, tb=None):
|
||||||
@@ -360,22 +383,18 @@ class YoutubeDL(object):
|
|||||||
in red if stderr is a tty file.
|
in red if stderr is a tty file.
|
||||||
'''
|
'''
|
||||||
if self._err_file.isatty() and os.name != 'nt':
|
if self._err_file.isatty() and os.name != 'nt':
|
||||||
_msg_header = u'\033[0;31mERROR:\033[0m'
|
_msg_header = '\033[0;31mERROR:\033[0m'
|
||||||
else:
|
else:
|
||||||
_msg_header = u'ERROR:'
|
_msg_header = 'ERROR:'
|
||||||
error_message = u'%s %s' % (_msg_header, message)
|
error_message = '%s %s' % (_msg_header, message)
|
||||||
self.trouble(error_message, tb)
|
self.trouble(error_message, tb)
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, file_name):
|
||||||
"""Report file has already been fully downloaded."""
|
"""Report file has already been fully downloaded."""
|
||||||
try:
|
try:
|
||||||
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
self.to_screen('[download] %s has already been downloaded' % file_name)
|
||||||
except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
self.to_screen(u'[download] The file has already been downloaded')
|
self.to_screen('[download] The file has already been downloaded')
|
||||||
|
|
||||||
def increment_downloads(self):
|
|
||||||
"""Increment the ordinal that assigns a number to each file."""
|
|
||||||
self._num_downloads += 1
|
|
||||||
|
|
||||||
def prepare_filename(self, info_dict):
|
def prepare_filename(self, info_dict):
|
||||||
"""Generate the output filename."""
|
"""Generate the output filename."""
|
||||||
@@ -386,61 +405,61 @@ class YoutubeDL(object):
|
|||||||
autonumber_size = self.params.get('autonumber_size')
|
autonumber_size = self.params.get('autonumber_size')
|
||||||
if autonumber_size is None:
|
if autonumber_size is None:
|
||||||
autonumber_size = 5
|
autonumber_size = 5
|
||||||
autonumber_templ = u'%0' + str(autonumber_size) + u'd'
|
autonumber_templ = '%0' + str(autonumber_size) + 'd'
|
||||||
template_dict['autonumber'] = autonumber_templ % self._num_downloads
|
template_dict['autonumber'] = autonumber_templ % self._num_downloads
|
||||||
if template_dict.get('playlist_index') is not None:
|
if template_dict.get('playlist_index') is not None:
|
||||||
template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
|
template_dict['playlist_index'] = '%05d' % template_dict['playlist_index']
|
||||||
|
|
||||||
sanitize = lambda k, v: sanitize_filename(
|
sanitize = lambda k, v: sanitize_filename(
|
||||||
compat_str(v),
|
compat_str(v),
|
||||||
restricted=self.params.get('restrictfilenames'),
|
restricted=self.params.get('restrictfilenames'),
|
||||||
is_id=(k == u'id'))
|
is_id=(k == 'id'))
|
||||||
template_dict = dict((k, sanitize(k, v))
|
template_dict = dict((k, sanitize(k, v))
|
||||||
for k, v in template_dict.items()
|
for k, v in template_dict.items()
|
||||||
if v is not None)
|
if v is not None)
|
||||||
template_dict = collections.defaultdict(lambda: u'NA', template_dict)
|
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
|
||||||
|
|
||||||
tmpl = os.path.expanduser(self.params['outtmpl'])
|
tmpl = os.path.expanduser(self.params['outtmpl'])
|
||||||
filename = tmpl % template_dict
|
filename = tmpl % template_dict
|
||||||
return filename
|
return filename
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
self.report_error(u'Error in output template: ' + str(err) + u' (encoding: ' + repr(preferredencoding()) + ')')
|
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _match_entry(self, info_dict):
|
def _match_entry(self, info_dict):
|
||||||
""" Returns None iff the file should be downloaded """
|
""" Returns None iff the file should be downloaded """
|
||||||
|
|
||||||
video_title = info_dict.get('title', info_dict.get('id', u'video'))
|
video_title = info_dict.get('title', info_dict.get('id', 'video'))
|
||||||
if 'title' in info_dict:
|
if 'title' in info_dict:
|
||||||
# This can happen when we're just evaluating the playlist
|
# This can happen when we're just evaluating the playlist
|
||||||
title = info_dict['title']
|
title = info_dict['title']
|
||||||
matchtitle = self.params.get('matchtitle', False)
|
matchtitle = self.params.get('matchtitle', False)
|
||||||
if matchtitle:
|
if matchtitle:
|
||||||
if not re.search(matchtitle, title, re.IGNORECASE):
|
if not re.search(matchtitle, title, re.IGNORECASE):
|
||||||
return u'"' + title + '" title did not match pattern "' + matchtitle + '"'
|
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
|
||||||
rejecttitle = self.params.get('rejecttitle', False)
|
rejecttitle = self.params.get('rejecttitle', False)
|
||||||
if rejecttitle:
|
if rejecttitle:
|
||||||
if re.search(rejecttitle, title, re.IGNORECASE):
|
if re.search(rejecttitle, title, re.IGNORECASE):
|
||||||
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
||||||
date = info_dict.get('upload_date', None)
|
date = info_dict.get('upload_date', None)
|
||||||
if date is not None:
|
if date is not None:
|
||||||
dateRange = self.params.get('daterange', DateRange())
|
dateRange = self.params.get('daterange', DateRange())
|
||||||
if date not in dateRange:
|
if date not in dateRange:
|
||||||
return u'%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
|
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
|
||||||
view_count = info_dict.get('view_count', None)
|
view_count = info_dict.get('view_count', None)
|
||||||
if view_count is not None:
|
if view_count is not None:
|
||||||
min_views = self.params.get('min_views')
|
min_views = self.params.get('min_views')
|
||||||
if min_views is not None and view_count < min_views:
|
if min_views is not None and view_count < min_views:
|
||||||
return u'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
|
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
|
||||||
max_views = self.params.get('max_views')
|
max_views = self.params.get('max_views')
|
||||||
if max_views is not None and view_count > max_views:
|
if max_views is not None and view_count > max_views:
|
||||||
return u'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
|
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
|
||||||
age_limit = self.params.get('age_limit')
|
age_limit = self.params.get('age_limit')
|
||||||
if age_limit is not None:
|
if age_limit is not None:
|
||||||
if age_limit < info_dict.get('age_limit', 0):
|
if age_limit < info_dict.get('age_limit', 0):
|
||||||
return u'Skipping "' + title + '" because it is age restricted'
|
return 'Skipping "' + title + '" because it is age restricted'
|
||||||
if self.in_download_archive(info_dict):
|
if self.in_download_archive(info_dict):
|
||||||
return u'%s has already been recorded in archive' % video_title
|
return '%s has already been recorded in archive' % video_title
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -467,8 +486,8 @@ class YoutubeDL(object):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
self.report_warning(u'The program functionality for this site has been marked as broken, '
|
self.report_warning('The program functionality for this site has been marked as broken, '
|
||||||
u'and will probably not work.')
|
'and will probably not work.')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ie_result = ie.extract(url)
|
ie_result = ie.extract(url)
|
||||||
@@ -484,6 +503,7 @@ class YoutubeDL(object):
|
|||||||
{
|
{
|
||||||
'extractor': ie.IE_NAME,
|
'extractor': ie.IE_NAME,
|
||||||
'webpage_url': url,
|
'webpage_url': url,
|
||||||
|
'webpage_url_basename': url_basename(url),
|
||||||
'extractor_key': ie.ie_key(),
|
'extractor_key': ie.ie_key(),
|
||||||
})
|
})
|
||||||
if process:
|
if process:
|
||||||
@@ -493,6 +513,8 @@ class YoutubeDL(object):
|
|||||||
except ExtractorError as de: # An error we somewhat expected
|
except ExtractorError as de: # An error we somewhat expected
|
||||||
self.report_error(compat_str(de), de.format_traceback())
|
self.report_error(compat_str(de), de.format_traceback())
|
||||||
break
|
break
|
||||||
|
except MaxDownloadsReached:
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if self.params.get('ignoreerrors', False):
|
if self.params.get('ignoreerrors', False):
|
||||||
self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
|
self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
|
||||||
@@ -500,7 +522,7 @@ class YoutubeDL(object):
|
|||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
self.report_error(u'no suitable InfoExtractor: %s' % url)
|
self.report_error('no suitable InfoExtractor: %s' % url)
|
||||||
|
|
||||||
def process_ie_result(self, ie_result, download=True, extra_info={}):
|
def process_ie_result(self, ie_result, download=True, extra_info={}):
|
||||||
"""
|
"""
|
||||||
@@ -531,7 +553,7 @@ class YoutubeDL(object):
|
|||||||
def make_result(embedded_info):
|
def make_result(embedded_info):
|
||||||
new_result = ie_result.copy()
|
new_result = ie_result.copy()
|
||||||
for f in ('_type', 'url', 'ext', 'player_url', 'formats',
|
for f in ('_type', 'url', 'ext', 'player_url', 'formats',
|
||||||
'entries', 'urlhandle', 'ie_key', 'duration',
|
'entries', 'ie_key', 'duration',
|
||||||
'subtitles', 'annotations', 'format',
|
'subtitles', 'annotations', 'format',
|
||||||
'thumbnail', 'thumbnails'):
|
'thumbnail', 'thumbnails'):
|
||||||
if f in new_result:
|
if f in new_result:
|
||||||
@@ -551,37 +573,46 @@ class YoutubeDL(object):
|
|||||||
elif result_type == 'playlist':
|
elif result_type == 'playlist':
|
||||||
# We process each entry in the playlist
|
# We process each entry in the playlist
|
||||||
playlist = ie_result.get('title', None) or ie_result.get('id', None)
|
playlist = ie_result.get('title', None) or ie_result.get('id', None)
|
||||||
self.to_screen(u'[download] Downloading playlist: %s' % playlist)
|
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
||||||
|
|
||||||
playlist_results = []
|
playlist_results = []
|
||||||
|
|
||||||
n_all_entries = len(ie_result['entries'])
|
|
||||||
playliststart = self.params.get('playliststart', 1) - 1
|
playliststart = self.params.get('playliststart', 1) - 1
|
||||||
playlistend = self.params.get('playlistend', None)
|
playlistend = self.params.get('playlistend', None)
|
||||||
# For backwards compatibility, interpret -1 as whole list
|
# For backwards compatibility, interpret -1 as whole list
|
||||||
if playlistend == -1:
|
if playlistend == -1:
|
||||||
playlistend = None
|
playlistend = None
|
||||||
|
|
||||||
|
if isinstance(ie_result['entries'], list):
|
||||||
|
n_all_entries = len(ie_result['entries'])
|
||||||
entries = ie_result['entries'][playliststart:playlistend]
|
entries = ie_result['entries'][playliststart:playlistend]
|
||||||
n_entries = len(entries)
|
n_entries = len(entries)
|
||||||
|
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
|
"[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
|
||||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||||
|
else:
|
||||||
|
assert isinstance(ie_result['entries'], PagedList)
|
||||||
|
entries = ie_result['entries'].getslice(
|
||||||
|
playliststart, playlistend)
|
||||||
|
n_entries = len(entries)
|
||||||
|
self.to_screen(
|
||||||
|
"[%s] playlist %s: Downloading %d videos" %
|
||||||
|
(ie_result['extractor'], playlist, n_entries))
|
||||||
|
|
||||||
for i, entry in enumerate(entries, 1):
|
for i, entry in enumerate(entries, 1):
|
||||||
self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries))
|
self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
|
||||||
extra = {
|
extra = {
|
||||||
'playlist': playlist,
|
'playlist': playlist,
|
||||||
'playlist_index': i + playliststart,
|
'playlist_index': i + playliststart,
|
||||||
'extractor': ie_result['extractor'],
|
'extractor': ie_result['extractor'],
|
||||||
'webpage_url': ie_result['webpage_url'],
|
'webpage_url': ie_result['webpage_url'],
|
||||||
|
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
||||||
'extractor_key': ie_result['extractor_key'],
|
'extractor_key': ie_result['extractor_key'],
|
||||||
}
|
}
|
||||||
|
|
||||||
reason = self._match_entry(entry)
|
reason = self._match_entry(entry)
|
||||||
if reason is not None:
|
if reason is not None:
|
||||||
self.to_screen(u'[download] ' + reason)
|
self.to_screen('[download] ' + reason)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
entry_result = self.process_ie_result(entry,
|
entry_result = self.process_ie_result(entry,
|
||||||
@@ -596,6 +627,7 @@ class YoutubeDL(object):
|
|||||||
{
|
{
|
||||||
'extractor': ie_result['extractor'],
|
'extractor': ie_result['extractor'],
|
||||||
'webpage_url': ie_result['webpage_url'],
|
'webpage_url': ie_result['webpage_url'],
|
||||||
|
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
||||||
'extractor_key': ie_result['extractor_key'],
|
'extractor_key': ie_result['extractor_key'],
|
||||||
})
|
})
|
||||||
return r
|
return r
|
||||||
@@ -612,8 +644,20 @@ class YoutubeDL(object):
|
|||||||
return available_formats[-1]
|
return available_formats[-1]
|
||||||
elif format_spec == 'worst':
|
elif format_spec == 'worst':
|
||||||
return available_formats[0]
|
return available_formats[0]
|
||||||
|
elif format_spec == 'bestaudio':
|
||||||
|
audio_formats = [
|
||||||
|
f for f in available_formats
|
||||||
|
if f.get('vcodec') == 'none']
|
||||||
|
if audio_formats:
|
||||||
|
return audio_formats[-1]
|
||||||
|
elif format_spec == 'worstaudio':
|
||||||
|
audio_formats = [
|
||||||
|
f for f in available_formats
|
||||||
|
if f.get('vcodec') == 'none']
|
||||||
|
if audio_formats:
|
||||||
|
return audio_formats[0]
|
||||||
else:
|
else:
|
||||||
extensions = [u'mp4', u'flv', u'webm', u'3gp']
|
extensions = ['mp4', 'flv', 'webm', '3gp']
|
||||||
if format_spec in extensions:
|
if format_spec in extensions:
|
||||||
filter_f = lambda f: f['ext'] == format_spec
|
filter_f = lambda f: f['ext'] == format_spec
|
||||||
else:
|
else:
|
||||||
@@ -632,7 +676,7 @@ class YoutubeDL(object):
|
|||||||
info_dict['playlist_index'] = None
|
info_dict['playlist_index'] = None
|
||||||
|
|
||||||
# This extractors handle format selection themselves
|
# This extractors handle format selection themselves
|
||||||
if info_dict['extractor'] in [u'youtube', u'Youku']:
|
if info_dict['extractor'] in ['Youku']:
|
||||||
if download:
|
if download:
|
||||||
self.process_info(info_dict)
|
self.process_info(info_dict)
|
||||||
return info_dict
|
return info_dict
|
||||||
@@ -649,35 +693,34 @@ class YoutubeDL(object):
|
|||||||
if format.get('format_id') is None:
|
if format.get('format_id') is None:
|
||||||
format['format_id'] = compat_str(i)
|
format['format_id'] = compat_str(i)
|
||||||
if format.get('format') is None:
|
if format.get('format') is None:
|
||||||
format['format'] = u'{id} - {res}{note}'.format(
|
format['format'] = '{id} - {res}{note}'.format(
|
||||||
id=format['format_id'],
|
id=format['format_id'],
|
||||||
res=self.format_resolution(format),
|
res=self.format_resolution(format),
|
||||||
note=u' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
|
note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
|
||||||
)
|
)
|
||||||
# Automatically determine file extension if missing
|
# Automatically determine file extension if missing
|
||||||
if 'ext' not in format:
|
if 'ext' not in format:
|
||||||
format['ext'] = determine_ext(format['url'])
|
format['ext'] = determine_ext(format['url'])
|
||||||
|
|
||||||
if self.params.get('listformats', None):
|
|
||||||
self.list_formats(info_dict)
|
|
||||||
return
|
|
||||||
|
|
||||||
format_limit = self.params.get('format_limit', None)
|
format_limit = self.params.get('format_limit', None)
|
||||||
if format_limit:
|
if format_limit:
|
||||||
formats = list(takewhile_inclusive(
|
formats = list(takewhile_inclusive(
|
||||||
lambda f: f['format_id'] != format_limit, formats
|
lambda f: f['format_id'] != format_limit, formats
|
||||||
))
|
))
|
||||||
if self.params.get('prefer_free_formats'):
|
|
||||||
def _free_formats_key(f):
|
|
||||||
try:
|
|
||||||
ext_ord = [u'flv', u'mp4', u'webm'].index(f['ext'])
|
|
||||||
except ValueError:
|
|
||||||
ext_ord = -1
|
|
||||||
# We only compare the extension if they have the same height and width
|
|
||||||
return (f.get('height'), f.get('width'), ext_ord)
|
|
||||||
formats = sorted(formats, key=_free_formats_key)
|
|
||||||
|
|
||||||
req_format = self.params.get('format', 'best')
|
# TODO Central sorting goes here
|
||||||
|
|
||||||
|
if formats[0] is not info_dict:
|
||||||
|
# only set the 'formats' fields if the original info_dict list them
|
||||||
|
# otherwise we end up with a circular reference, the first (and unique)
|
||||||
|
# element in the 'formats' field in info_dict is info_dict itself,
|
||||||
|
# wich can't be exported to json
|
||||||
|
info_dict['formats'] = formats
|
||||||
|
if self.params.get('listformats', None):
|
||||||
|
self.list_formats(info_dict)
|
||||||
|
return
|
||||||
|
|
||||||
|
req_format = self.params.get('format')
|
||||||
if req_format is None:
|
if req_format is None:
|
||||||
req_format = 'best'
|
req_format = 'best'
|
||||||
formats_to_download = []
|
formats_to_download = []
|
||||||
@@ -685,21 +728,35 @@ class YoutubeDL(object):
|
|||||||
if req_format in ('-1', 'all'):
|
if req_format in ('-1', 'all'):
|
||||||
formats_to_download = formats
|
formats_to_download = formats
|
||||||
else:
|
else:
|
||||||
# We can accept formats requestd in the format: 34/5/best, we pick
|
# We can accept formats requested in the format: 34/5/best, we pick
|
||||||
# the first that is available, starting from left
|
# the first that is available, starting from left
|
||||||
req_formats = req_format.split('/')
|
req_formats = req_format.split('/')
|
||||||
for rf in req_formats:
|
for rf in req_formats:
|
||||||
|
if re.match(r'.+?\+.+?', rf) is not None:
|
||||||
|
# Two formats have been requested like '137+139'
|
||||||
|
format_1, format_2 = rf.split('+')
|
||||||
|
formats_info = (self.select_format(format_1, formats),
|
||||||
|
self.select_format(format_2, formats))
|
||||||
|
if all(formats_info):
|
||||||
|
selected_format = {
|
||||||
|
'requested_formats': formats_info,
|
||||||
|
'format': rf,
|
||||||
|
'ext': formats_info[0]['ext'],
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
selected_format = None
|
||||||
|
else:
|
||||||
selected_format = self.select_format(rf, formats)
|
selected_format = self.select_format(rf, formats)
|
||||||
if selected_format is not None:
|
if selected_format is not None:
|
||||||
formats_to_download = [selected_format]
|
formats_to_download = [selected_format]
|
||||||
break
|
break
|
||||||
if not formats_to_download:
|
if not formats_to_download:
|
||||||
raise ExtractorError(u'requested format not available',
|
raise ExtractorError('requested format not available',
|
||||||
expected=True)
|
expected=True)
|
||||||
|
|
||||||
if download:
|
if download:
|
||||||
if len(formats_to_download) > 1:
|
if len(formats_to_download) > 1:
|
||||||
self.to_screen(u'[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
|
self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
|
||||||
for format in formats_to_download:
|
for format in formats_to_download:
|
||||||
new_info = dict(info_dict)
|
new_info = dict(info_dict)
|
||||||
new_info.update(format)
|
new_info.update(format)
|
||||||
@@ -712,12 +769,15 @@ class YoutubeDL(object):
|
|||||||
"""Process a single resolved IE result."""
|
"""Process a single resolved IE result."""
|
||||||
|
|
||||||
assert info_dict.get('_type', 'video') == 'video'
|
assert info_dict.get('_type', 'video') == 'video'
|
||||||
#We increment the download the download count here to match the previous behaviour.
|
|
||||||
self.increment_downloads()
|
max_downloads = self.params.get('max_downloads')
|
||||||
|
if max_downloads is not None:
|
||||||
|
if self._num_downloads >= int(max_downloads):
|
||||||
|
raise MaxDownloadsReached()
|
||||||
|
|
||||||
info_dict['fulltitle'] = info_dict['title']
|
info_dict['fulltitle'] = info_dict['title']
|
||||||
if len(info_dict['title']) > 200:
|
if len(info_dict['title']) > 200:
|
||||||
info_dict['title'] = info_dict['title'][:197] + u'...'
|
info_dict['title'] = info_dict['title'][:197] + '...'
|
||||||
|
|
||||||
# Keep for backwards compatibility
|
# Keep for backwards compatibility
|
||||||
info_dict['stitle'] = info_dict['title']
|
info_dict['stitle'] = info_dict['title']
|
||||||
@@ -727,13 +787,10 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
reason = self._match_entry(info_dict)
|
reason = self._match_entry(info_dict)
|
||||||
if reason is not None:
|
if reason is not None:
|
||||||
self.to_screen(u'[download] ' + reason)
|
self.to_screen('[download] ' + reason)
|
||||||
return
|
return
|
||||||
|
|
||||||
max_downloads = self.params.get('max_downloads')
|
self._num_downloads += 1
|
||||||
if max_downloads is not None:
|
|
||||||
if self._num_downloads > int(max_downloads):
|
|
||||||
raise MaxDownloadsReached()
|
|
||||||
|
|
||||||
filename = self.prepare_filename(info_dict)
|
filename = self.prepare_filename(info_dict)
|
||||||
|
|
||||||
@@ -744,7 +801,7 @@ class YoutubeDL(object):
|
|||||||
self.to_stdout(info_dict['id'])
|
self.to_stdout(info_dict['id'])
|
||||||
if self.params.get('forceurl', False):
|
if self.params.get('forceurl', False):
|
||||||
# For RTMP URLs, also include the playpath
|
# For RTMP URLs, also include the playpath
|
||||||
self.to_stdout(info_dict['url'] + info_dict.get('play_path', u''))
|
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
|
||||||
if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
|
if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
|
||||||
self.to_stdout(info_dict['thumbnail'])
|
self.to_stdout(info_dict['thumbnail'])
|
||||||
if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
|
if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
|
||||||
@@ -771,37 +828,37 @@ class YoutubeDL(object):
|
|||||||
if dn != '' and not os.path.exists(dn):
|
if dn != '' and not os.path.exists(dn):
|
||||||
os.makedirs(dn)
|
os.makedirs(dn)
|
||||||
except (OSError, IOError) as err:
|
except (OSError, IOError) as err:
|
||||||
self.report_error(u'unable to create directory ' + compat_str(err))
|
self.report_error('unable to create directory ' + compat_str(err))
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.params.get('writedescription', False):
|
if self.params.get('writedescription', False):
|
||||||
descfn = filename + u'.description'
|
descfn = filename + '.description'
|
||||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
|
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
|
||||||
self.to_screen(u'[info] Video description is already present')
|
self.to_screen('[info] Video description is already present')
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
self.to_screen(u'[info] Writing video description to: ' + descfn)
|
self.to_screen('[info] Writing video description to: ' + descfn)
|
||||||
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
|
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
|
||||||
descfile.write(info_dict['description'])
|
descfile.write(info_dict['description'])
|
||||||
except (KeyError, TypeError):
|
except (KeyError, TypeError):
|
||||||
self.report_warning(u'There\'s no description to write.')
|
self.report_warning('There\'s no description to write.')
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
self.report_error(u'Cannot write description file ' + descfn)
|
self.report_error('Cannot write description file ' + descfn)
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.params.get('writeannotations', False):
|
if self.params.get('writeannotations', False):
|
||||||
annofn = filename + u'.annotations.xml'
|
annofn = filename + '.annotations.xml'
|
||||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
|
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
|
||||||
self.to_screen(u'[info] Video annotations are already present')
|
self.to_screen('[info] Video annotations are already present')
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
self.to_screen(u'[info] Writing video annotations to: ' + annofn)
|
self.to_screen('[info] Writing video annotations to: ' + annofn)
|
||||||
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
|
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
|
||||||
annofile.write(info_dict['annotations'])
|
annofile.write(info_dict['annotations'])
|
||||||
except (KeyError, TypeError):
|
except (KeyError, TypeError):
|
||||||
self.report_warning(u'There are no annotations to write.')
|
self.report_warning('There are no annotations to write.')
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
self.report_error(u'Cannot write annotations file: ' + annofn)
|
self.report_error('Cannot write annotations file: ' + annofn)
|
||||||
return
|
return
|
||||||
|
|
||||||
subtitles_are_requested = any([self.params.get('writesubtitles', False),
|
subtitles_are_requested = any([self.params.get('writesubtitles', False),
|
||||||
@@ -819,46 +876,45 @@ class YoutubeDL(object):
|
|||||||
try:
|
try:
|
||||||
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
|
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
|
||||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
|
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
|
||||||
self.to_screen(u'[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
|
self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
|
||||||
else:
|
else:
|
||||||
self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
|
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
|
||||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||||
subfile.write(sub)
|
subfile.write(sub)
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
self.report_error(u'Cannot write subtitles file ' + descfn)
|
self.report_error('Cannot write subtitles file ' + descfn)
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.params.get('writeinfojson', False):
|
if self.params.get('writeinfojson', False):
|
||||||
infofn = os.path.splitext(filename)[0] + u'.info.json'
|
infofn = os.path.splitext(filename)[0] + '.info.json'
|
||||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
|
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
|
||||||
self.to_screen(u'[info] Video description metadata is already present')
|
self.to_screen('[info] Video description metadata is already present')
|
||||||
else:
|
else:
|
||||||
self.to_screen(u'[info] Writing video description metadata as JSON to: ' + infofn)
|
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
|
||||||
try:
|
try:
|
||||||
json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle'])
|
write_json_file(info_dict, encodeFilename(infofn))
|
||||||
write_json_file(json_info_dict, encodeFilename(infofn))
|
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
self.report_error(u'Cannot write metadata to JSON file ' + infofn)
|
self.report_error('Cannot write metadata to JSON file ' + infofn)
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.params.get('writethumbnail', False):
|
if self.params.get('writethumbnail', False):
|
||||||
if info_dict.get('thumbnail') is not None:
|
if info_dict.get('thumbnail') is not None:
|
||||||
thumb_format = determine_ext(info_dict['thumbnail'], u'jpg')
|
thumb_format = determine_ext(info_dict['thumbnail'], 'jpg')
|
||||||
thumb_filename = os.path.splitext(filename)[0] + u'.' + thumb_format
|
thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format
|
||||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
|
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
|
||||||
self.to_screen(u'[%s] %s: Thumbnail is already present' %
|
self.to_screen('[%s] %s: Thumbnail is already present' %
|
||||||
(info_dict['extractor'], info_dict['id']))
|
(info_dict['extractor'], info_dict['id']))
|
||||||
else:
|
else:
|
||||||
self.to_screen(u'[%s] %s: Downloading thumbnail ...' %
|
self.to_screen('[%s] %s: Downloading thumbnail ...' %
|
||||||
(info_dict['extractor'], info_dict['id']))
|
(info_dict['extractor'], info_dict['id']))
|
||||||
try:
|
try:
|
||||||
uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
|
uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
|
||||||
with open(thumb_filename, 'wb') as thumbf:
|
with open(thumb_filename, 'wb') as thumbf:
|
||||||
shutil.copyfileobj(uf, thumbf)
|
shutil.copyfileobj(uf, thumbf)
|
||||||
self.to_screen(u'[%s] %s: Writing thumbnail to: %s' %
|
self.to_screen('[%s] %s: Writing thumbnail to: %s' %
|
||||||
(info_dict['extractor'], info_dict['id'], thumb_filename))
|
(info_dict['extractor'], info_dict['id'], thumb_filename))
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
self.report_warning(u'Unable to download thumbnail "%s": %s' %
|
self.report_warning('Unable to download thumbnail "%s": %s' %
|
||||||
(info_dict['thumbnail'], compat_str(err)))
|
(info_dict['thumbnail'], compat_str(err)))
|
||||||
|
|
||||||
if not self.params.get('skip_download', False):
|
if not self.params.get('skip_download', False):
|
||||||
@@ -866,21 +922,49 @@ class YoutubeDL(object):
|
|||||||
success = True
|
success = True
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
success = self.fd._do_download(filename, info_dict)
|
def dl(name, info):
|
||||||
|
fd = get_suitable_downloader(info)(self, self.params)
|
||||||
|
for ph in self._progress_hooks:
|
||||||
|
fd.add_progress_hook(ph)
|
||||||
|
return fd.download(name, info)
|
||||||
|
if info_dict.get('requested_formats') is not None:
|
||||||
|
downloaded = []
|
||||||
|
success = True
|
||||||
|
merger = FFmpegMergerPP(self)
|
||||||
|
if not merger._get_executable():
|
||||||
|
postprocessors = []
|
||||||
|
self.report_warning('You have requested multiple '
|
||||||
|
'formats but ffmpeg or avconv are not installed.'
|
||||||
|
' The formats won\'t be merged')
|
||||||
|
else:
|
||||||
|
postprocessors = [merger]
|
||||||
|
for f in info_dict['requested_formats']:
|
||||||
|
new_info = dict(info_dict)
|
||||||
|
new_info.update(f)
|
||||||
|
fname = self.prepare_filename(new_info)
|
||||||
|
fname = prepend_extension(fname, 'f%s' % f['format_id'])
|
||||||
|
downloaded.append(fname)
|
||||||
|
partial_success = dl(fname, new_info)
|
||||||
|
success = success and partial_success
|
||||||
|
info_dict['__postprocessors'] = postprocessors
|
||||||
|
info_dict['__files_to_merge'] = downloaded
|
||||||
|
else:
|
||||||
|
# Just a single file
|
||||||
|
success = dl(filename, info_dict)
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
self.report_error(u'unable to download video data: %s' % str(err))
|
self.report_error('unable to download video data: %s' % str(err))
|
||||||
return
|
return
|
||||||
except (OSError, IOError) as err:
|
except (OSError, IOError) as err:
|
||||||
raise UnavailableVideoError(err)
|
raise UnavailableVideoError(err)
|
||||||
except (ContentTooShortError, ) as err:
|
except (ContentTooShortError, ) as err:
|
||||||
self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||||
return
|
return
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
try:
|
try:
|
||||||
self.post_process(filename, info_dict)
|
self.post_process(filename, info_dict)
|
||||||
except (PostProcessingError) as err:
|
except (PostProcessingError) as err:
|
||||||
self.report_error(u'postprocessing: %s' % str(err))
|
self.report_error('postprocessing: %s' % str(err))
|
||||||
return
|
return
|
||||||
|
|
||||||
self.record_download_archive(info_dict)
|
self.record_download_archive(info_dict)
|
||||||
@@ -897,9 +981,9 @@ class YoutubeDL(object):
|
|||||||
#It also downloads the videos
|
#It also downloads the videos
|
||||||
self.extract_info(url)
|
self.extract_info(url)
|
||||||
except UnavailableVideoError:
|
except UnavailableVideoError:
|
||||||
self.report_error(u'unable to download video')
|
self.report_error('unable to download video')
|
||||||
except MaxDownloadsReached:
|
except MaxDownloadsReached:
|
||||||
self.to_screen(u'[info] Maximum number of downloaded files reached.')
|
self.to_screen('[info] Maximum number of downloaded files reached.')
|
||||||
raise
|
raise
|
||||||
|
|
||||||
return self._download_retcode
|
return self._download_retcode
|
||||||
@@ -912,7 +996,7 @@ class YoutubeDL(object):
|
|||||||
except DownloadError:
|
except DownloadError:
|
||||||
webpage_url = info.get('webpage_url')
|
webpage_url = info.get('webpage_url')
|
||||||
if webpage_url is not None:
|
if webpage_url is not None:
|
||||||
self.report_warning(u'The info failed to download, trying with "%s"' % webpage_url)
|
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
|
||||||
return self.download([webpage_url])
|
return self.download([webpage_url])
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
@@ -923,7 +1007,11 @@ class YoutubeDL(object):
|
|||||||
info = dict(ie_info)
|
info = dict(ie_info)
|
||||||
info['filepath'] = filename
|
info['filepath'] = filename
|
||||||
keep_video = None
|
keep_video = None
|
||||||
for pp in self._pps:
|
pps_chain = []
|
||||||
|
if ie_info.get('__postprocessors') is not None:
|
||||||
|
pps_chain.extend(ie_info['__postprocessors'])
|
||||||
|
pps_chain.extend(self._pps)
|
||||||
|
for pp in pps_chain:
|
||||||
try:
|
try:
|
||||||
keep_video_wish, new_info = pp.run(info)
|
keep_video_wish, new_info = pp.run(info)
|
||||||
if keep_video_wish is not None:
|
if keep_video_wish is not None:
|
||||||
@@ -936,10 +1024,10 @@ class YoutubeDL(object):
|
|||||||
self.report_error(e.msg)
|
self.report_error(e.msg)
|
||||||
if keep_video is False and not self.params.get('keepvideo', False):
|
if keep_video is False and not self.params.get('keepvideo', False):
|
||||||
try:
|
try:
|
||||||
self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename)
|
self.to_screen('Deleting original file %s (pass -k to keep)' % filename)
|
||||||
os.remove(encodeFilename(filename))
|
os.remove(encodeFilename(filename))
|
||||||
except (IOError, OSError):
|
except (IOError, OSError):
|
||||||
self.report_warning(u'Unable to remove downloaded video file')
|
self.report_warning('Unable to remove downloaded video file')
|
||||||
|
|
||||||
def _make_archive_id(self, info_dict):
|
def _make_archive_id(self, info_dict):
|
||||||
# Future-proof against any change in case
|
# Future-proof against any change in case
|
||||||
@@ -950,7 +1038,7 @@ class YoutubeDL(object):
|
|||||||
extractor = info_dict.get('ie_key') # key in a playlist
|
extractor = info_dict.get('ie_key') # key in a playlist
|
||||||
if extractor is None:
|
if extractor is None:
|
||||||
return None # Incomplete video information
|
return None # Incomplete video information
|
||||||
return extractor.lower() + u' ' + info_dict['id']
|
return extractor.lower() + ' ' + info_dict['id']
|
||||||
|
|
||||||
def in_download_archive(self, info_dict):
|
def in_download_archive(self, info_dict):
|
||||||
fn = self.params.get('download_archive')
|
fn = self.params.get('download_archive')
|
||||||
@@ -978,53 +1066,72 @@ class YoutubeDL(object):
|
|||||||
vid_id = self._make_archive_id(info_dict)
|
vid_id = self._make_archive_id(info_dict)
|
||||||
assert vid_id
|
assert vid_id
|
||||||
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
|
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
|
||||||
archive_file.write(vid_id + u'\n')
|
archive_file.write(vid_id + '\n')
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def format_resolution(format, default='unknown'):
|
def format_resolution(format, default='unknown'):
|
||||||
if format.get('vcodec') == 'none':
|
if format.get('vcodec') == 'none':
|
||||||
return 'audio only'
|
return 'audio only'
|
||||||
if format.get('_resolution') is not None:
|
if format.get('resolution') is not None:
|
||||||
return format['_resolution']
|
return format['resolution']
|
||||||
if format.get('height') is not None:
|
if format.get('height') is not None:
|
||||||
if format.get('width') is not None:
|
if format.get('width') is not None:
|
||||||
res = u'%sx%s' % (format['width'], format['height'])
|
res = '%sx%s' % (format['width'], format['height'])
|
||||||
else:
|
else:
|
||||||
res = u'%sp' % format['height']
|
res = '%sp' % format['height']
|
||||||
|
elif format.get('width') is not None:
|
||||||
|
res = '?x%d' % format['width']
|
||||||
else:
|
else:
|
||||||
res = default
|
res = default
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def list_formats(self, info_dict):
|
def list_formats(self, info_dict):
|
||||||
def format_note(fdict):
|
def format_note(fdict):
|
||||||
res = u''
|
res = ''
|
||||||
|
if fdict.get('ext') in ['f4f', 'f4m']:
|
||||||
|
res += '(unsupported) '
|
||||||
if fdict.get('format_note') is not None:
|
if fdict.get('format_note') is not None:
|
||||||
res += fdict['format_note'] + u' '
|
res += fdict['format_note'] + ' '
|
||||||
|
if fdict.get('tbr') is not None:
|
||||||
|
res += '%4dk ' % fdict['tbr']
|
||||||
|
if fdict.get('container') is not None:
|
||||||
|
if res:
|
||||||
|
res += ', '
|
||||||
|
res += '%s container' % fdict['container']
|
||||||
if (fdict.get('vcodec') is not None and
|
if (fdict.get('vcodec') is not None and
|
||||||
fdict.get('vcodec') != 'none'):
|
fdict.get('vcodec') != 'none'):
|
||||||
res += u'%-5s' % fdict['vcodec']
|
if res:
|
||||||
elif fdict.get('vbr') is not None:
|
res += ', '
|
||||||
res += u'video'
|
res += fdict['vcodec']
|
||||||
if fdict.get('vbr') is not None:
|
if fdict.get('vbr') is not None:
|
||||||
res += u'@%4dk' % fdict['vbr']
|
res += '@'
|
||||||
|
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
|
||||||
|
res += 'video@'
|
||||||
|
if fdict.get('vbr') is not None:
|
||||||
|
res += '%4dk' % fdict['vbr']
|
||||||
if fdict.get('acodec') is not None:
|
if fdict.get('acodec') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += u', '
|
res += ', '
|
||||||
res += u'%-5s' % fdict['acodec']
|
if fdict['acodec'] == 'none':
|
||||||
|
res += 'video only'
|
||||||
|
else:
|
||||||
|
res += '%-5s' % fdict['acodec']
|
||||||
elif fdict.get('abr') is not None:
|
elif fdict.get('abr') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += u', '
|
res += ', '
|
||||||
res += 'audio'
|
res += 'audio'
|
||||||
if fdict.get('abr') is not None:
|
if fdict.get('abr') is not None:
|
||||||
res += u'@%3dk' % fdict['abr']
|
res += '@%3dk' % fdict['abr']
|
||||||
|
if fdict.get('asr') is not None:
|
||||||
|
res += ' (%5dHz)' % fdict['asr']
|
||||||
if fdict.get('filesize') is not None:
|
if fdict.get('filesize') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += u', '
|
res += ', '
|
||||||
res += format_bytes(fdict['filesize'])
|
res += format_bytes(fdict['filesize'])
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def line(format, idlen=20):
|
def line(format, idlen=20):
|
||||||
return ((u'%-' + compat_str(idlen + 1) + u's%-10s%-12s%s') % (
|
return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
|
||||||
format['format_id'],
|
format['format_id'],
|
||||||
format['ext'],
|
format['ext'],
|
||||||
self.format_resolution(format),
|
self.format_resolution(format),
|
||||||
@@ -1032,7 +1139,7 @@ class YoutubeDL(object):
|
|||||||
))
|
))
|
||||||
|
|
||||||
formats = info_dict.get('formats', [info_dict])
|
formats = info_dict.get('formats', [info_dict])
|
||||||
idlen = max(len(u'format code'),
|
idlen = max(len('format code'),
|
||||||
max(len(f['format_id']) for f in formats))
|
max(len(f['format_id']) for f in formats))
|
||||||
formats_s = [line(f, idlen) for f in formats]
|
formats_s = [line(f, idlen) for f in formats]
|
||||||
if len(formats) > 1:
|
if len(formats) > 1:
|
||||||
@@ -1040,10 +1147,10 @@ class YoutubeDL(object):
|
|||||||
formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)'
|
formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)'
|
||||||
|
|
||||||
header_line = line({
|
header_line = line({
|
||||||
'format_id': u'format code', 'ext': u'extension',
|
'format_id': 'format code', 'ext': 'extension',
|
||||||
'_resolution': u'resolution', 'format_note': u'note'}, idlen=idlen)
|
'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
|
||||||
self.to_screen(u'[info] Available formats for %s:\n%s\n%s' %
|
self.to_screen('[info] Available formats for %s:\n%s\n%s' %
|
||||||
(info_dict['id'], header_line, u"\n".join(formats_s)))
|
(info_dict['id'], header_line, '\n'.join(formats_s)))
|
||||||
|
|
||||||
def urlopen(self, req):
|
def urlopen(self, req):
|
||||||
""" Start an HTTP download """
|
""" Start an HTTP download """
|
||||||
@@ -1052,7 +1159,7 @@ class YoutubeDL(object):
|
|||||||
def print_debug_header(self):
|
def print_debug_header(self):
|
||||||
if not self.params.get('verbose'):
|
if not self.params.get('verbose'):
|
||||||
return
|
return
|
||||||
write_string(u'[debug] youtube-dl version ' + __version__ + u'\n')
|
write_string('[debug] youtube-dl version ' + __version__ + '\n')
|
||||||
try:
|
try:
|
||||||
sp = subprocess.Popen(
|
sp = subprocess.Popen(
|
||||||
['git', 'rev-parse', '--short', 'HEAD'],
|
['git', 'rev-parse', '--short', 'HEAD'],
|
||||||
@@ -1061,20 +1168,20 @@ class YoutubeDL(object):
|
|||||||
out, err = sp.communicate()
|
out, err = sp.communicate()
|
||||||
out = out.decode().strip()
|
out = out.decode().strip()
|
||||||
if re.match('[0-9a-f]+', out):
|
if re.match('[0-9a-f]+', out):
|
||||||
write_string(u'[debug] Git HEAD: ' + out + u'\n')
|
write_string('[debug] Git HEAD: ' + out + '\n')
|
||||||
except:
|
except:
|
||||||
try:
|
try:
|
||||||
sys.exc_clear()
|
sys.exc_clear()
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
write_string(u'[debug] Python version %s - %s' %
|
write_string('[debug] Python version %s - %s' %
|
||||||
(platform.python_version(), platform_name()) + u'\n')
|
(platform.python_version(), platform_name()) + '\n')
|
||||||
|
|
||||||
proxy_map = {}
|
proxy_map = {}
|
||||||
for handler in self._opener.handlers:
|
for handler in self._opener.handlers:
|
||||||
if hasattr(handler, 'proxies'):
|
if hasattr(handler, 'proxies'):
|
||||||
proxy_map.update(handler.proxies)
|
proxy_map.update(handler.proxies)
|
||||||
write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n')
|
write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
|
||||||
|
|
||||||
def _setup_opener(self):
|
def _setup_opener(self):
|
||||||
timeout_val = self.params.get('socket_timeout')
|
timeout_val = self.params.get('socket_timeout')
|
||||||
@@ -1104,10 +1211,13 @@ class YoutubeDL(object):
|
|||||||
if 'http' in proxies and 'https' not in proxies:
|
if 'http' in proxies and 'https' not in proxies:
|
||||||
proxies['https'] = proxies['http']
|
proxies['https'] = proxies['http']
|
||||||
proxy_handler = compat_urllib_request.ProxyHandler(proxies)
|
proxy_handler = compat_urllib_request.ProxyHandler(proxies)
|
||||||
|
|
||||||
|
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
|
||||||
https_handler = make_HTTPS_handler(
|
https_handler = make_HTTPS_handler(
|
||||||
self.params.get('nocheckcertificate', False))
|
self.params.get('nocheckcertificate', False), debuglevel=debuglevel)
|
||||||
|
ydlh = YoutubeDLHandler(debuglevel=debuglevel)
|
||||||
opener = compat_urllib_request.build_opener(
|
opener = compat_urllib_request.build_opener(
|
||||||
https_handler, proxy_handler, cookie_processor, YoutubeDLHandler())
|
https_handler, proxy_handler, cookie_processor, ydlh)
|
||||||
# Delete the default user-agent header, which would otherwise apply in
|
# Delete the default user-agent header, which would otherwise apply in
|
||||||
# cases where our custom HTTP handler doesn't come into play
|
# cases where our custom HTTP handler doesn't come into play
|
||||||
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
|
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
|
||||||
|
|||||||
@@ -38,12 +38,21 @@ __authors__ = (
|
|||||||
'Takuya Tsuchida',
|
'Takuya Tsuchida',
|
||||||
'Sergey M.',
|
'Sergey M.',
|
||||||
'Michael Orlitzky',
|
'Michael Orlitzky',
|
||||||
|
'Chris Gahan',
|
||||||
|
'Saimadhav Heblikar',
|
||||||
|
'Mike Col',
|
||||||
|
'Oleg Prutz',
|
||||||
|
'pulpe',
|
||||||
|
'Andreas Schmitz',
|
||||||
|
'Michael Kaiser',
|
||||||
|
'Niklas Laxström',
|
||||||
)
|
)
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
import getpass
|
import getpass
|
||||||
|
import locale
|
||||||
import optparse
|
import optparse
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
@@ -56,7 +65,6 @@ from .utils import (
|
|||||||
compat_print,
|
compat_print,
|
||||||
DateRange,
|
DateRange,
|
||||||
decodeOption,
|
decodeOption,
|
||||||
determine_ext,
|
|
||||||
get_term_width,
|
get_term_width,
|
||||||
DownloadError,
|
DownloadError,
|
||||||
get_cachedir,
|
get_cachedir,
|
||||||
@@ -74,11 +82,12 @@ from .FileDownloader import (
|
|||||||
from .extractor import gen_extractors
|
from .extractor import gen_extractors
|
||||||
from .version import __version__
|
from .version import __version__
|
||||||
from .YoutubeDL import YoutubeDL
|
from .YoutubeDL import YoutubeDL
|
||||||
from .PostProcessor import (
|
from .postprocessor import (
|
||||||
FFmpegMetadataPP,
|
FFmpegMetadataPP,
|
||||||
FFmpegVideoConvertor,
|
FFmpegVideoConvertor,
|
||||||
FFmpegExtractAudioPP,
|
FFmpegExtractAudioPP,
|
||||||
FFmpegEmbedSubtitlePP,
|
FFmpegEmbedSubtitlePP,
|
||||||
|
XAttrMetadataPP,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -96,6 +105,43 @@ def parseOpts(overrideArguments=None):
|
|||||||
optionf.close()
|
optionf.close()
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def _readUserConf():
|
||||||
|
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
|
||||||
|
if xdg_config_home:
|
||||||
|
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
|
||||||
|
if not os.path.isfile(userConfFile):
|
||||||
|
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
|
||||||
|
else:
|
||||||
|
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
|
||||||
|
if not os.path.isfile(userConfFile):
|
||||||
|
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
|
||||||
|
userConf = _readOptions(userConfFile, None)
|
||||||
|
|
||||||
|
if userConf is None:
|
||||||
|
appdata_dir = os.environ.get('appdata')
|
||||||
|
if appdata_dir:
|
||||||
|
userConf = _readOptions(
|
||||||
|
os.path.join(appdata_dir, 'youtube-dl', 'config'),
|
||||||
|
default=None)
|
||||||
|
if userConf is None:
|
||||||
|
userConf = _readOptions(
|
||||||
|
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
|
||||||
|
default=None)
|
||||||
|
|
||||||
|
if userConf is None:
|
||||||
|
userConf = _readOptions(
|
||||||
|
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
|
||||||
|
default=None)
|
||||||
|
if userConf is None:
|
||||||
|
userConf = _readOptions(
|
||||||
|
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
|
||||||
|
default=None)
|
||||||
|
|
||||||
|
if userConf is None:
|
||||||
|
userConf = []
|
||||||
|
|
||||||
|
return userConf
|
||||||
|
|
||||||
def _format_option_string(option):
|
def _format_option_string(option):
|
||||||
''' ('-o', '--option') -> -o, --format METAVAR'''
|
''' ('-o', '--option') -> -o, --format METAVAR'''
|
||||||
|
|
||||||
@@ -186,16 +232,23 @@ def parseOpts(overrideArguments=None):
|
|||||||
general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
|
general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
|
'--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
|
||||||
help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .')
|
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--no-cache-dir', action='store_const', const=None, dest='cachedir',
|
'--no-cache-dir', action='store_const', const=None, dest='cachedir',
|
||||||
help='Disable filesystem caching')
|
help='Disable filesystem caching')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--socket-timeout', dest='socket_timeout',
|
'--socket-timeout', dest='socket_timeout',
|
||||||
type=float, default=None, help=optparse.SUPPRESS_HELP)
|
type=float, default=None, help=u'Time to wait before giving up, in seconds')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
||||||
help=u'Work around terminals that lack bidirectional text support. Requires fribidi executable in PATH')
|
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
||||||
|
general.add_option('--default-search',
|
||||||
|
dest='default_search', metavar='PREFIX',
|
||||||
|
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
||||||
|
general.add_option(
|
||||||
|
'--ignore-config',
|
||||||
|
action='store_true',
|
||||||
|
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
||||||
|
|
||||||
|
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
@@ -214,8 +267,12 @@ def parseOpts(overrideArguments=None):
|
|||||||
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
|
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||||
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
|
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||||
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
|
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
|
||||||
selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None)
|
selection.add_option(
|
||||||
selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None)
|
'--datebefore', metavar='DATE', dest='datebefore', default=None,
|
||||||
|
help='download only videos uploaded on or before this date (i.e. inclusive)')
|
||||||
|
selection.add_option(
|
||||||
|
'--dateafter', metavar='DATE', dest='dateafter', default=None,
|
||||||
|
help='download only videos uploaded on or after this date (i.e. inclusive)')
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--min-views', metavar='COUNT', dest='min_views',
|
'--min-views', metavar='COUNT', dest='min_views',
|
||||||
default=None, type=int,
|
default=None, type=int,
|
||||||
@@ -231,7 +288,14 @@ def parseOpts(overrideArguments=None):
|
|||||||
selection.add_option('--download-archive', metavar='FILE',
|
selection.add_option('--download-archive', metavar='FILE',
|
||||||
dest='download_archive',
|
dest='download_archive',
|
||||||
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
|
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
|
||||||
|
selection.add_option(
|
||||||
|
'--include-ads', dest='include_ads',
|
||||||
|
action='store_true',
|
||||||
|
help='Download advertisements as well (experimental)')
|
||||||
|
selection.add_option(
|
||||||
|
'--youtube-include-dash-manifest', action='store_true',
|
||||||
|
dest='youtube_include_dash_manifest', default=False,
|
||||||
|
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
||||||
|
|
||||||
authentication.add_option('-u', '--username',
|
authentication.add_option('-u', '--username',
|
||||||
dest='username', metavar='USERNAME', help='account username')
|
dest='username', metavar='USERNAME', help='account username')
|
||||||
@@ -240,12 +304,12 @@ def parseOpts(overrideArguments=None):
|
|||||||
authentication.add_option('-n', '--netrc',
|
authentication.add_option('-n', '--netrc',
|
||||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
||||||
authentication.add_option('--video-password',
|
authentication.add_option('--video-password',
|
||||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo only)')
|
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
||||||
|
|
||||||
|
|
||||||
video_format.add_option('-f', '--format',
|
video_format.add_option('-f', '--format',
|
||||||
action='store', dest='format', metavar='FORMAT', default='best',
|
action='store', dest='format', metavar='FORMAT', default=None,
|
||||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported')
|
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestaudio", "worst", and "worstaudio". By default, youtube-dl will pick the best quality.')
|
||||||
video_format.add_option('--all-formats',
|
video_format.add_option('--all-formats',
|
||||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||||
video_format.add_option('--prefer-free-formats',
|
video_format.add_option('--prefer-free-formats',
|
||||||
@@ -253,7 +317,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
video_format.add_option('--max-quality',
|
video_format.add_option('--max-quality',
|
||||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
||||||
video_format.add_option('-F', '--list-formats',
|
video_format.add_option('-F', '--list-formats',
|
||||||
action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
|
action='store_true', dest='listformats', help='list all available formats')
|
||||||
|
|
||||||
subtitles.add_option('--write-sub', '--write-srt',
|
subtitles.add_option('--write-sub', '--write-srt',
|
||||||
action='store_true', dest='writesubtitles',
|
action='store_true', dest='writesubtitles',
|
||||||
@@ -334,6 +398,9 @@ def parseOpts(overrideArguments=None):
|
|||||||
verbosity.add_option('--youtube-print-sig-code',
|
verbosity.add_option('--youtube-print-sig-code',
|
||||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
action='store_true', dest='youtube_print_sig_code', default=False,
|
||||||
help=optparse.SUPPRESS_HELP)
|
help=optparse.SUPPRESS_HELP)
|
||||||
|
verbosity.add_option('--print-traffic',
|
||||||
|
dest='debug_printtraffic', action='store_true', default=False,
|
||||||
|
help='Display sent and read HTTP traffic')
|
||||||
|
|
||||||
|
|
||||||
filesystem.add_option('-t', '--title',
|
filesystem.add_option('-t', '--title',
|
||||||
@@ -369,7 +436,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
||||||
filesystem.add_option('--load-info',
|
filesystem.add_option('--load-info',
|
||||||
dest='load_info_filename', metavar='FILE',
|
dest='load_info_filename', metavar='FILE',
|
||||||
help='json file containing the video information (created with the "--write-json" option')
|
help='json file containing the video information (created with the "--write-json" option)')
|
||||||
filesystem.add_option('-w', '--no-overwrites',
|
filesystem.add_option('-w', '--no-overwrites',
|
||||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
||||||
filesystem.add_option('-c', '--continue',
|
filesystem.add_option('-c', '--continue',
|
||||||
@@ -413,7 +480,13 @@ def parseOpts(overrideArguments=None):
|
|||||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
||||||
help='embed subtitles in the video (only for mp4 videos)')
|
help='embed subtitles in the video (only for mp4 videos)')
|
||||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
||||||
help='add metadata to the files')
|
help='write metadata to the video file')
|
||||||
|
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
||||||
|
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||||
|
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
|
||||||
|
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
||||||
|
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
|
||||||
|
help='Prefer ffmpeg over avconv for running the postprocessors')
|
||||||
|
|
||||||
|
|
||||||
parser.add_option_group(general)
|
parser.add_option_group(general)
|
||||||
@@ -431,49 +504,25 @@ def parseOpts(overrideArguments=None):
|
|||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
||||||
else:
|
else:
|
||||||
systemConf = _readOptions('/etc/youtube-dl.conf')
|
|
||||||
|
|
||||||
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
|
|
||||||
if xdg_config_home:
|
|
||||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
|
|
||||||
if not os.path.isfile(userConfFile):
|
|
||||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
|
|
||||||
else:
|
|
||||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
|
|
||||||
if not os.path.isfile(userConfFile):
|
|
||||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
|
|
||||||
userConf = _readOptions(userConfFile, None)
|
|
||||||
|
|
||||||
if userConf is None:
|
|
||||||
appdata_dir = os.environ.get('appdata')
|
|
||||||
if appdata_dir:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(appdata_dir, 'youtube-dl', 'config'),
|
|
||||||
default=None)
|
|
||||||
if userConf is None:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
|
|
||||||
default=None)
|
|
||||||
|
|
||||||
if userConf is None:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
|
|
||||||
default=None)
|
|
||||||
if userConf is None:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
|
|
||||||
default=None)
|
|
||||||
|
|
||||||
if userConf is None:
|
|
||||||
userConf = []
|
|
||||||
|
|
||||||
commandLineConf = sys.argv[1:]
|
commandLineConf = sys.argv[1:]
|
||||||
|
if '--ignore-config' in commandLineConf:
|
||||||
|
systemConf = []
|
||||||
|
userConf = []
|
||||||
|
else:
|
||||||
|
systemConf = _readOptions('/etc/youtube-dl.conf')
|
||||||
|
if '--ignore-config' in systemConf:
|
||||||
|
userConf = []
|
||||||
|
else:
|
||||||
|
userConf = _readUserConf()
|
||||||
argv = systemConf + userConf + commandLineConf
|
argv = systemConf + userConf + commandLineConf
|
||||||
|
|
||||||
opts, args = parser.parse_args(argv)
|
opts, args = parser.parse_args(argv)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
||||||
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
||||||
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
||||||
|
write_string(u'[debug] Encodings: locale %r, fs %r, out %r, pref: %r\n' %
|
||||||
|
(locale.getpreferredencoding(), sys.getfilesystemencoding(), sys.stdout.encoding, preferredencoding()))
|
||||||
|
|
||||||
return parser, opts, args
|
return parser, opts, args
|
||||||
|
|
||||||
@@ -518,6 +567,8 @@ def _real_main(argv=None):
|
|||||||
sys.exit(u'ERROR: batch file could not be read')
|
sys.exit(u'ERROR: batch file could not be read')
|
||||||
all_urls = batchurls + args
|
all_urls = batchurls + args
|
||||||
all_urls = [url.strip() for url in all_urls]
|
all_urls = [url.strip() for url in all_urls]
|
||||||
|
_enc = preferredencoding()
|
||||||
|
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
|
||||||
|
|
||||||
extractors = gen_extractors()
|
extractors = gen_extractors()
|
||||||
|
|
||||||
@@ -525,7 +576,6 @@ def _real_main(argv=None):
|
|||||||
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
||||||
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
||||||
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||||
all_urls = [url for url in all_urls if url not in matchedUrls]
|
|
||||||
for mu in matchedUrls:
|
for mu in matchedUrls:
|
||||||
compat_print(u' ' + mu)
|
compat_print(u' ' + mu)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
@@ -598,6 +648,12 @@ def _real_main(argv=None):
|
|||||||
date = DateRange.day(opts.date)
|
date = DateRange.day(opts.date)
|
||||||
else:
|
else:
|
||||||
date = DateRange(opts.dateafter, opts.datebefore)
|
date = DateRange(opts.dateafter, opts.datebefore)
|
||||||
|
if opts.default_search not in ('auto', None) and ':' not in opts.default_search:
|
||||||
|
parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
|
||||||
|
|
||||||
|
# Do not download videos when there are audio-only formats
|
||||||
|
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||||
|
opts.format = 'bestaudio/best'
|
||||||
|
|
||||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||||
# this was the old behaviour if only --all-sub was given.
|
# this was the old behaviour if only --all-sub was given.
|
||||||
@@ -622,6 +678,7 @@ def _real_main(argv=None):
|
|||||||
u' template'.format(outtmpl))
|
u' template'.format(outtmpl))
|
||||||
|
|
||||||
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
|
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
|
||||||
|
download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
||||||
|
|
||||||
ydl_opts = {
|
ydl_opts = {
|
||||||
'usenetrc': opts.usenetrc,
|
'usenetrc': opts.usenetrc,
|
||||||
@@ -689,12 +746,17 @@ def _real_main(argv=None):
|
|||||||
'cachedir': opts.cachedir,
|
'cachedir': opts.cachedir,
|
||||||
'youtube_print_sig_code': opts.youtube_print_sig_code,
|
'youtube_print_sig_code': opts.youtube_print_sig_code,
|
||||||
'age_limit': opts.age_limit,
|
'age_limit': opts.age_limit,
|
||||||
'download_archive': opts.download_archive,
|
'download_archive': download_archive_fn,
|
||||||
'cookiefile': opts.cookiefile,
|
'cookiefile': opts.cookiefile,
|
||||||
'nocheckcertificate': opts.no_check_certificate,
|
'nocheckcertificate': opts.no_check_certificate,
|
||||||
'proxy': opts.proxy,
|
'proxy': opts.proxy,
|
||||||
'socket_timeout': opts.socket_timeout,
|
'socket_timeout': opts.socket_timeout,
|
||||||
'bidi_workaround': opts.bidi_workaround,
|
'bidi_workaround': opts.bidi_workaround,
|
||||||
|
'debug_printtraffic': opts.debug_printtraffic,
|
||||||
|
'prefer_ffmpeg': opts.prefer_ffmpeg,
|
||||||
|
'include_ads': opts.include_ads,
|
||||||
|
'default_search': opts.default_search,
|
||||||
|
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||||
}
|
}
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
@@ -711,6 +773,8 @@ def _real_main(argv=None):
|
|||||||
ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
|
ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
|
||||||
if opts.embedsubtitles:
|
if opts.embedsubtitles:
|
||||||
ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
|
ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
|
||||||
|
if opts.xattrs:
|
||||||
|
ydl.add_post_processor(XAttrMetadataPP())
|
||||||
|
|
||||||
# Update version
|
# Update version
|
||||||
if opts.update_self:
|
if opts.update_self:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_decrypt_text']
|
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
from math import ceil
|
from math import ceil
|
||||||
@@ -32,6 +32,31 @@ def aes_ctr_decrypt(data, key, counter):
|
|||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
def aes_cbc_decrypt(data, key, iv):
|
||||||
|
"""
|
||||||
|
Decrypt with aes in CBC mode
|
||||||
|
|
||||||
|
@param {int[]} data cipher
|
||||||
|
@param {int[]} key 16/24/32-Byte cipher key
|
||||||
|
@param {int[]} iv 16-Byte IV
|
||||||
|
@returns {int[]} decrypted data
|
||||||
|
"""
|
||||||
|
expanded_key = key_expansion(key)
|
||||||
|
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||||
|
|
||||||
|
decrypted_data=[]
|
||||||
|
previous_cipher_block = iv
|
||||||
|
for i in range(block_count):
|
||||||
|
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
||||||
|
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
||||||
|
|
||||||
|
decrypted_block = aes_decrypt(block, expanded_key)
|
||||||
|
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||||
|
previous_cipher_block = block
|
||||||
|
decrypted_data = decrypted_data[:len(data)]
|
||||||
|
|
||||||
|
return decrypted_data
|
||||||
|
|
||||||
def key_expansion(data):
|
def key_expansion(data):
|
||||||
"""
|
"""
|
||||||
Generate key schedule
|
Generate key schedule
|
||||||
@@ -86,6 +111,26 @@ def aes_encrypt(data, expanded_key):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
def aes_decrypt(data, expanded_key):
|
||||||
|
"""
|
||||||
|
Decrypt one block with aes
|
||||||
|
|
||||||
|
@param {int[]} data 16-Byte cipher
|
||||||
|
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
||||||
|
@returns {int[]} 16-Byte state
|
||||||
|
"""
|
||||||
|
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||||
|
|
||||||
|
for i in range(rounds, 0, -1):
|
||||||
|
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
||||||
|
if i != rounds:
|
||||||
|
data = mix_columns_inv(data)
|
||||||
|
data = shift_rows_inv(data)
|
||||||
|
data = sub_bytes_inv(data)
|
||||||
|
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
def aes_decrypt_text(data, password, key_size_bytes):
|
def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
"""
|
"""
|
||||||
Decrypt text
|
Decrypt text
|
||||||
@@ -139,14 +184,69 @@ SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B,
|
|||||||
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
|
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
|
||||||
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
|
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
|
||||||
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16)
|
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16)
|
||||||
MIX_COLUMN_MATRIX = ((2,3,1,1),
|
SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
|
||||||
(1,2,3,1),
|
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
|
||||||
(1,1,2,3),
|
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
|
||||||
(3,1,1,2))
|
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
|
||||||
|
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
|
||||||
|
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
|
||||||
|
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
|
||||||
|
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
|
||||||
|
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
|
||||||
|
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
|
||||||
|
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
|
||||||
|
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
|
||||||
|
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
|
||||||
|
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
||||||
|
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
||||||
|
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
|
||||||
|
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
|
||||||
|
(0x1,0x2,0x3,0x1),
|
||||||
|
(0x1,0x1,0x2,0x3),
|
||||||
|
(0x3,0x1,0x1,0x2))
|
||||||
|
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
|
||||||
|
(0x9,0xE,0xB,0xD),
|
||||||
|
(0xD,0x9,0xE,0xB),
|
||||||
|
(0xB,0xD,0x9,0xE))
|
||||||
|
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
|
||||||
|
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
|
||||||
|
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
|
||||||
|
0x53, 0xF5, 0x04, 0x0C, 0x14, 0x3C, 0x44, 0xCC, 0x4F, 0xD1, 0x68, 0xB8, 0xD3, 0x6E, 0xB2, 0xCD,
|
||||||
|
0x4C, 0xD4, 0x67, 0xA9, 0xE0, 0x3B, 0x4D, 0xD7, 0x62, 0xA6, 0xF1, 0x08, 0x18, 0x28, 0x78, 0x88,
|
||||||
|
0x83, 0x9E, 0xB9, 0xD0, 0x6B, 0xBD, 0xDC, 0x7F, 0x81, 0x98, 0xB3, 0xCE, 0x49, 0xDB, 0x76, 0x9A,
|
||||||
|
0xB5, 0xC4, 0x57, 0xF9, 0x10, 0x30, 0x50, 0xF0, 0x0B, 0x1D, 0x27, 0x69, 0xBB, 0xD6, 0x61, 0xA3,
|
||||||
|
0xFE, 0x19, 0x2B, 0x7D, 0x87, 0x92, 0xAD, 0xEC, 0x2F, 0x71, 0x93, 0xAE, 0xE9, 0x20, 0x60, 0xA0,
|
||||||
|
0xFB, 0x16, 0x3A, 0x4E, 0xD2, 0x6D, 0xB7, 0xC2, 0x5D, 0xE7, 0x32, 0x56, 0xFA, 0x15, 0x3F, 0x41,
|
||||||
|
0xC3, 0x5E, 0xE2, 0x3D, 0x47, 0xC9, 0x40, 0xC0, 0x5B, 0xED, 0x2C, 0x74, 0x9C, 0xBF, 0xDA, 0x75,
|
||||||
|
0x9F, 0xBA, 0xD5, 0x64, 0xAC, 0xEF, 0x2A, 0x7E, 0x82, 0x9D, 0xBC, 0xDF, 0x7A, 0x8E, 0x89, 0x80,
|
||||||
|
0x9B, 0xB6, 0xC1, 0x58, 0xE8, 0x23, 0x65, 0xAF, 0xEA, 0x25, 0x6F, 0xB1, 0xC8, 0x43, 0xC5, 0x54,
|
||||||
|
0xFC, 0x1F, 0x21, 0x63, 0xA5, 0xF4, 0x07, 0x09, 0x1B, 0x2D, 0x77, 0x99, 0xB0, 0xCB, 0x46, 0xCA,
|
||||||
|
0x45, 0xCF, 0x4A, 0xDE, 0x79, 0x8B, 0x86, 0x91, 0xA8, 0xE3, 0x3E, 0x42, 0xC6, 0x51, 0xF3, 0x0E,
|
||||||
|
0x12, 0x36, 0x5A, 0xEE, 0x29, 0x7B, 0x8D, 0x8C, 0x8F, 0x8A, 0x85, 0x94, 0xA7, 0xF2, 0x0D, 0x17,
|
||||||
|
0x39, 0x4B, 0xDD, 0x7C, 0x84, 0x97, 0xA2, 0xFD, 0x1C, 0x24, 0x6C, 0xB4, 0xC7, 0x52, 0xF6, 0x01)
|
||||||
|
RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03,
|
||||||
|
0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1,
|
||||||
|
0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78,
|
||||||
|
0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e,
|
||||||
|
0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38,
|
||||||
|
0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10,
|
||||||
|
0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba,
|
||||||
|
0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57,
|
||||||
|
0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8,
|
||||||
|
0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0,
|
||||||
|
0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7,
|
||||||
|
0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d,
|
||||||
|
0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1,
|
||||||
|
0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab,
|
||||||
|
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
||||||
|
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||||
|
|
||||||
def sub_bytes(data):
|
def sub_bytes(data):
|
||||||
return [SBOX[x] for x in data]
|
return [SBOX[x] for x in data]
|
||||||
|
|
||||||
|
def sub_bytes_inv(data):
|
||||||
|
return [SBOX_INV[x] for x in data]
|
||||||
|
|
||||||
def rotate(data):
|
def rotate(data):
|
||||||
return data[1:] + [data[0]]
|
return data[1:] + [data[0]]
|
||||||
|
|
||||||
@@ -160,30 +260,31 @@ def key_schedule_core(data, rcon_iteration):
|
|||||||
def xor(data1, data2):
|
def xor(data1, data2):
|
||||||
return [x^y for x, y in zip(data1, data2)]
|
return [x^y for x, y in zip(data1, data2)]
|
||||||
|
|
||||||
def mix_column(data):
|
def rijndael_mul(a, b):
|
||||||
|
if(a==0 or b==0):
|
||||||
|
return 0
|
||||||
|
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||||
|
|
||||||
|
def mix_column(data, matrix):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
mixed = 0
|
mixed = 0
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
addend = data[column]
|
# xor is (+) and (-)
|
||||||
if MIX_COLUMN_MATRIX[row][column] in (2,3):
|
mixed ^= rijndael_mul(data[column], matrix[row][column])
|
||||||
addend <<= 1
|
|
||||||
if addend > 0xff:
|
|
||||||
addend &= 0xff
|
|
||||||
addend ^= 0x1b
|
|
||||||
if MIX_COLUMN_MATRIX[row][column] == 3:
|
|
||||||
addend ^= data[column]
|
|
||||||
mixed ^= addend & 0xff
|
|
||||||
data_mixed.append(mixed)
|
data_mixed.append(mixed)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
def mix_columns(data):
|
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for i in range(4):
|
for i in range(4):
|
||||||
column = data[i*4 : (i+1)*4]
|
column = data[i*4 : (i+1)*4]
|
||||||
data_mixed += mix_column(column)
|
data_mixed += mix_column(column, matrix)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
|
def mix_columns_inv(data):
|
||||||
|
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
||||||
|
|
||||||
def shift_rows(data):
|
def shift_rows(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
@@ -191,6 +292,13 @@ def shift_rows(data):
|
|||||||
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
|
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
def shift_rows_inv(data):
|
||||||
|
data_shifted = []
|
||||||
|
for column in range(4):
|
||||||
|
for row in range(4):
|
||||||
|
data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
|
||||||
|
return data_shifted
|
||||||
|
|
||||||
def inc(data):
|
def inc(data):
|
||||||
data = data[:] # copy
|
data = data[:] # copy
|
||||||
for i in range(len(data)-1,-1,-1):
|
for i in range(len(data)-1,-1,-1):
|
||||||
|
|||||||
29
youtube_dl/downloader/__init__.py
Normal file
29
youtube_dl/downloader/__init__.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from .hls import HlsFD
|
||||||
|
from .http import HttpFD
|
||||||
|
from .mplayer import MplayerFD
|
||||||
|
from .rtmp import RtmpFD
|
||||||
|
from .f4m import F4mFD
|
||||||
|
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_suitable_downloader(info_dict):
|
||||||
|
"""Get the downloader class that can handle the info dict."""
|
||||||
|
url = info_dict['url']
|
||||||
|
protocol = info_dict.get('protocol')
|
||||||
|
|
||||||
|
if url.startswith('rtmp'):
|
||||||
|
return RtmpFD
|
||||||
|
if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
|
||||||
|
return HlsFD
|
||||||
|
if url.startswith('mms') or url.startswith('rtsp'):
|
||||||
|
return MplayerFD
|
||||||
|
if determine_ext(url) == 'f4m':
|
||||||
|
return F4mFD
|
||||||
|
else:
|
||||||
|
return HttpFD
|
||||||
316
youtube_dl/downloader/common.py
Normal file
316
youtube_dl/downloader/common.py
Normal file
@@ -0,0 +1,316 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
from ..utils import (
|
||||||
|
encodeFilename,
|
||||||
|
timeconvert,
|
||||||
|
format_bytes,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FileDownloader(object):
|
||||||
|
"""File Downloader class.
|
||||||
|
|
||||||
|
File downloader objects are the ones responsible of downloading the
|
||||||
|
actual video file and writing it to disk.
|
||||||
|
|
||||||
|
File downloaders accept a lot of parameters. In order not to saturate
|
||||||
|
the object constructor with arguments, it receives a dictionary of
|
||||||
|
options instead.
|
||||||
|
|
||||||
|
Available options:
|
||||||
|
|
||||||
|
verbose: Print additional info to stdout.
|
||||||
|
quiet: Do not print messages to stdout.
|
||||||
|
ratelimit: Download speed limit, in bytes/sec.
|
||||||
|
retries: Number of times to retry for HTTP error 5xx
|
||||||
|
buffersize: Size of download buffer in bytes.
|
||||||
|
noresizebuffer: Do not automatically resize the download buffer.
|
||||||
|
continuedl: Try to continue downloads if possible.
|
||||||
|
noprogress: Do not print the progress bar.
|
||||||
|
logtostderr: Log messages to stderr instead of stdout.
|
||||||
|
consoletitle: Display progress in console window's titlebar.
|
||||||
|
nopart: Do not use temporary .part files.
|
||||||
|
updatetime: Use the Last-modified header to set output file timestamps.
|
||||||
|
test: Download only first bytes to test the downloader.
|
||||||
|
min_filesize: Skip files smaller than this size
|
||||||
|
max_filesize: Skip files larger than this size
|
||||||
|
|
||||||
|
Subclasses of this one must re-define the real_download method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
params = None
|
||||||
|
|
||||||
|
def __init__(self, ydl, params):
|
||||||
|
"""Create a FileDownloader object with the given options."""
|
||||||
|
self.ydl = ydl
|
||||||
|
self._progress_hooks = []
|
||||||
|
self.params = params
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_seconds(seconds):
|
||||||
|
(mins, secs) = divmod(seconds, 60)
|
||||||
|
(hours, mins) = divmod(mins, 60)
|
||||||
|
if hours > 99:
|
||||||
|
return '--:--:--'
|
||||||
|
if hours == 0:
|
||||||
|
return '%02d:%02d' % (mins, secs)
|
||||||
|
else:
|
||||||
|
return '%02d:%02d:%02d' % (hours, mins, secs)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def calc_percent(byte_counter, data_len):
|
||||||
|
if data_len is None:
|
||||||
|
return None
|
||||||
|
return float(byte_counter) / float(data_len) * 100.0
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_percent(percent):
|
||||||
|
if percent is None:
|
||||||
|
return '---.-%'
|
||||||
|
return '%6s' % ('%3.1f%%' % percent)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def calc_eta(start, now, total, current):
|
||||||
|
if total is None:
|
||||||
|
return None
|
||||||
|
dif = now - start
|
||||||
|
if current == 0 or dif < 0.001: # One millisecond
|
||||||
|
return None
|
||||||
|
rate = float(current) / dif
|
||||||
|
return int((float(total) - float(current)) / rate)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_eta(eta):
|
||||||
|
if eta is None:
|
||||||
|
return '--:--'
|
||||||
|
return FileDownloader.format_seconds(eta)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def calc_speed(start, now, bytes):
|
||||||
|
dif = now - start
|
||||||
|
if bytes == 0 or dif < 0.001: # One millisecond
|
||||||
|
return None
|
||||||
|
return float(bytes) / dif
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_speed(speed):
|
||||||
|
if speed is None:
|
||||||
|
return '%10s' % '---b/s'
|
||||||
|
return '%10s' % ('%s/s' % format_bytes(speed))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def best_block_size(elapsed_time, bytes):
|
||||||
|
new_min = max(bytes / 2.0, 1.0)
|
||||||
|
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
||||||
|
if elapsed_time < 0.001:
|
||||||
|
return int(new_max)
|
||||||
|
rate = bytes / elapsed_time
|
||||||
|
if rate > new_max:
|
||||||
|
return int(new_max)
|
||||||
|
if rate < new_min:
|
||||||
|
return int(new_min)
|
||||||
|
return int(rate)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_bytes(bytestr):
|
||||||
|
"""Parse a string indicating a byte quantity into an integer."""
|
||||||
|
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
|
||||||
|
if matchobj is None:
|
||||||
|
return None
|
||||||
|
number = float(matchobj.group(1))
|
||||||
|
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
|
||||||
|
return int(round(number * multiplier))
|
||||||
|
|
||||||
|
def to_screen(self, *args, **kargs):
|
||||||
|
self.ydl.to_screen(*args, **kargs)
|
||||||
|
|
||||||
|
def to_stderr(self, message):
|
||||||
|
self.ydl.to_screen(message)
|
||||||
|
|
||||||
|
def to_console_title(self, message):
|
||||||
|
self.ydl.to_console_title(message)
|
||||||
|
|
||||||
|
def trouble(self, *args, **kargs):
|
||||||
|
self.ydl.trouble(*args, **kargs)
|
||||||
|
|
||||||
|
def report_warning(self, *args, **kargs):
|
||||||
|
self.ydl.report_warning(*args, **kargs)
|
||||||
|
|
||||||
|
def report_error(self, *args, **kargs):
|
||||||
|
self.ydl.report_error(*args, **kargs)
|
||||||
|
|
||||||
|
def slow_down(self, start_time, byte_counter):
|
||||||
|
"""Sleep if the download speed is over the rate limit."""
|
||||||
|
rate_limit = self.params.get('ratelimit', None)
|
||||||
|
if rate_limit is None or byte_counter == 0:
|
||||||
|
return
|
||||||
|
now = time.time()
|
||||||
|
elapsed = now - start_time
|
||||||
|
if elapsed <= 0.0:
|
||||||
|
return
|
||||||
|
speed = float(byte_counter) / elapsed
|
||||||
|
if speed > rate_limit:
|
||||||
|
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
|
||||||
|
|
||||||
|
def temp_name(self, filename):
|
||||||
|
"""Returns a temporary filename for the given filename."""
|
||||||
|
if self.params.get('nopart', False) or filename == u'-' or \
|
||||||
|
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
||||||
|
return filename
|
||||||
|
return filename + u'.part'
|
||||||
|
|
||||||
|
def undo_temp_name(self, filename):
|
||||||
|
if filename.endswith(u'.part'):
|
||||||
|
return filename[:-len(u'.part')]
|
||||||
|
return filename
|
||||||
|
|
||||||
|
def try_rename(self, old_filename, new_filename):
|
||||||
|
try:
|
||||||
|
if old_filename == new_filename:
|
||||||
|
return
|
||||||
|
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
self.report_error(u'unable to rename file: %s' % str(err))
|
||||||
|
|
||||||
|
def try_utime(self, filename, last_modified_hdr):
|
||||||
|
"""Try to set the last-modified time of the given file."""
|
||||||
|
if last_modified_hdr is None:
|
||||||
|
return
|
||||||
|
if not os.path.isfile(encodeFilename(filename)):
|
||||||
|
return
|
||||||
|
timestr = last_modified_hdr
|
||||||
|
if timestr is None:
|
||||||
|
return
|
||||||
|
filetime = timeconvert(timestr)
|
||||||
|
if filetime is None:
|
||||||
|
return filetime
|
||||||
|
# Ignore obviously invalid dates
|
||||||
|
if filetime == 0:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
os.utime(filename, (time.time(), filetime))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return filetime
|
||||||
|
|
||||||
|
def report_destination(self, filename):
|
||||||
|
"""Report destination filename."""
|
||||||
|
self.to_screen(u'[download] Destination: ' + filename)
|
||||||
|
|
||||||
|
def _report_progress_status(self, msg, is_last_line=False):
|
||||||
|
fullmsg = u'[download] ' + msg
|
||||||
|
if self.params.get('progress_with_newline', False):
|
||||||
|
self.to_screen(fullmsg)
|
||||||
|
else:
|
||||||
|
if os.name == 'nt':
|
||||||
|
prev_len = getattr(self, '_report_progress_prev_line_length',
|
||||||
|
0)
|
||||||
|
if prev_len > len(fullmsg):
|
||||||
|
fullmsg += u' ' * (prev_len - len(fullmsg))
|
||||||
|
self._report_progress_prev_line_length = len(fullmsg)
|
||||||
|
clear_line = u'\r'
|
||||||
|
else:
|
||||||
|
clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
|
||||||
|
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
||||||
|
self.to_console_title(u'youtube-dl ' + msg)
|
||||||
|
|
||||||
|
def report_progress(self, percent, data_len_str, speed, eta):
|
||||||
|
"""Report download progress."""
|
||||||
|
if self.params.get('noprogress', False):
|
||||||
|
return
|
||||||
|
if eta is not None:
|
||||||
|
eta_str = self.format_eta(eta)
|
||||||
|
else:
|
||||||
|
eta_str = 'Unknown ETA'
|
||||||
|
if percent is not None:
|
||||||
|
percent_str = self.format_percent(percent)
|
||||||
|
else:
|
||||||
|
percent_str = 'Unknown %'
|
||||||
|
speed_str = self.format_speed(speed)
|
||||||
|
|
||||||
|
msg = (u'%s of %s at %s ETA %s' %
|
||||||
|
(percent_str, data_len_str, speed_str, eta_str))
|
||||||
|
self._report_progress_status(msg)
|
||||||
|
|
||||||
|
def report_progress_live_stream(self, downloaded_data_len, speed, elapsed):
|
||||||
|
if self.params.get('noprogress', False):
|
||||||
|
return
|
||||||
|
downloaded_str = format_bytes(downloaded_data_len)
|
||||||
|
speed_str = self.format_speed(speed)
|
||||||
|
elapsed_str = FileDownloader.format_seconds(elapsed)
|
||||||
|
msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
|
||||||
|
self._report_progress_status(msg)
|
||||||
|
|
||||||
|
def report_finish(self, data_len_str, tot_time):
|
||||||
|
"""Report download finished."""
|
||||||
|
if self.params.get('noprogress', False):
|
||||||
|
self.to_screen(u'[download] Download completed')
|
||||||
|
else:
|
||||||
|
self._report_progress_status(
|
||||||
|
(u'100%% of %s in %s' %
|
||||||
|
(data_len_str, self.format_seconds(tot_time))),
|
||||||
|
is_last_line=True)
|
||||||
|
|
||||||
|
def report_resuming_byte(self, resume_len):
|
||||||
|
"""Report attempt to resume at given byte."""
|
||||||
|
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
|
||||||
|
|
||||||
|
def report_retry(self, count, retries):
|
||||||
|
"""Report retry in case of HTTP error 5xx"""
|
||||||
|
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
||||||
|
|
||||||
|
def report_file_already_downloaded(self, file_name):
|
||||||
|
"""Report file has already been fully downloaded."""
|
||||||
|
try:
|
||||||
|
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
self.to_screen(u'[download] The file has already been downloaded')
|
||||||
|
|
||||||
|
def report_unable_to_resume(self):
|
||||||
|
"""Report it was impossible to resume download."""
|
||||||
|
self.to_screen(u'[download] Unable to resume')
|
||||||
|
|
||||||
|
def download(self, filename, info_dict):
|
||||||
|
"""Download to a filename using the info from info_dict
|
||||||
|
Return True on success and False otherwise
|
||||||
|
"""
|
||||||
|
# Check file already present
|
||||||
|
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
|
||||||
|
self.report_file_already_downloaded(filename)
|
||||||
|
self._hook_progress({
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
'total_bytes': os.path.getsize(encodeFilename(filename)),
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self.real_download(filename, info_dict)
|
||||||
|
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
"""Real download process. Redefine in subclasses."""
|
||||||
|
raise NotImplementedError(u'This method must be implemented by sublcasses')
|
||||||
|
|
||||||
|
def _hook_progress(self, status):
|
||||||
|
for ph in self._progress_hooks:
|
||||||
|
ph(status)
|
||||||
|
|
||||||
|
def add_progress_hook(self, ph):
|
||||||
|
""" ph gets called on download progress, with a dictionary with the entries
|
||||||
|
* filename: The final filename
|
||||||
|
* status: One of "downloading" and "finished"
|
||||||
|
|
||||||
|
It can also have some of the following entries:
|
||||||
|
|
||||||
|
* downloaded_bytes: Bytes on disks
|
||||||
|
* total_bytes: Total bytes, None if unknown
|
||||||
|
* tmpfilename: The filename we're currently writing to
|
||||||
|
* eta: The estimated time in seconds, None if unknown
|
||||||
|
* speed: The download speed in bytes/second, None if unknown
|
||||||
|
|
||||||
|
Hooks are guaranteed to be called at least once (with status "finished")
|
||||||
|
if the download is successful.
|
||||||
|
"""
|
||||||
|
self._progress_hooks.append(ph)
|
||||||
315
youtube_dl/downloader/f4m.py
Normal file
315
youtube_dl/downloader/f4m.py
Normal file
@@ -0,0 +1,315 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import io
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from .http import HttpFD
|
||||||
|
from ..utils import (
|
||||||
|
struct_pack,
|
||||||
|
struct_unpack,
|
||||||
|
compat_urllib_request,
|
||||||
|
compat_urlparse,
|
||||||
|
format_bytes,
|
||||||
|
encodeFilename,
|
||||||
|
sanitize_open,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FlvReader(io.BytesIO):
|
||||||
|
"""
|
||||||
|
Reader for Flv files
|
||||||
|
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Utility functions for reading numbers and strings
|
||||||
|
def read_unsigned_long_long(self):
|
||||||
|
return struct_unpack('!Q', self.read(8))[0]
|
||||||
|
|
||||||
|
def read_unsigned_int(self):
|
||||||
|
return struct_unpack('!I', self.read(4))[0]
|
||||||
|
|
||||||
|
def read_unsigned_char(self):
|
||||||
|
return struct_unpack('!B', self.read(1))[0]
|
||||||
|
|
||||||
|
def read_string(self):
|
||||||
|
res = b''
|
||||||
|
while True:
|
||||||
|
char = self.read(1)
|
||||||
|
if char == b'\x00':
|
||||||
|
break
|
||||||
|
res += char
|
||||||
|
return res
|
||||||
|
|
||||||
|
def read_box_info(self):
|
||||||
|
"""
|
||||||
|
Read a box and return the info as a tuple: (box_size, box_type, box_data)
|
||||||
|
"""
|
||||||
|
real_size = size = self.read_unsigned_int()
|
||||||
|
box_type = self.read(4)
|
||||||
|
header_end = 8
|
||||||
|
if size == 1:
|
||||||
|
real_size = self.read_unsigned_long_long()
|
||||||
|
header_end = 16
|
||||||
|
return real_size, box_type, self.read(real_size-header_end)
|
||||||
|
|
||||||
|
def read_asrt(self):
|
||||||
|
# version
|
||||||
|
self.read_unsigned_char()
|
||||||
|
# flags
|
||||||
|
self.read(3)
|
||||||
|
quality_entry_count = self.read_unsigned_char()
|
||||||
|
# QualityEntryCount
|
||||||
|
for i in range(quality_entry_count):
|
||||||
|
self.read_string()
|
||||||
|
|
||||||
|
segment_run_count = self.read_unsigned_int()
|
||||||
|
segments = []
|
||||||
|
for i in range(segment_run_count):
|
||||||
|
first_segment = self.read_unsigned_int()
|
||||||
|
fragments_per_segment = self.read_unsigned_int()
|
||||||
|
segments.append((first_segment, fragments_per_segment))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'segment_run': segments,
|
||||||
|
}
|
||||||
|
|
||||||
|
def read_afrt(self):
|
||||||
|
# version
|
||||||
|
self.read_unsigned_char()
|
||||||
|
# flags
|
||||||
|
self.read(3)
|
||||||
|
# time scale
|
||||||
|
self.read_unsigned_int()
|
||||||
|
|
||||||
|
quality_entry_count = self.read_unsigned_char()
|
||||||
|
# QualitySegmentUrlModifiers
|
||||||
|
for i in range(quality_entry_count):
|
||||||
|
self.read_string()
|
||||||
|
|
||||||
|
fragments_count = self.read_unsigned_int()
|
||||||
|
fragments = []
|
||||||
|
for i in range(fragments_count):
|
||||||
|
first = self.read_unsigned_int()
|
||||||
|
first_ts = self.read_unsigned_long_long()
|
||||||
|
duration = self.read_unsigned_int()
|
||||||
|
if duration == 0:
|
||||||
|
discontinuity_indicator = self.read_unsigned_char()
|
||||||
|
else:
|
||||||
|
discontinuity_indicator = None
|
||||||
|
fragments.append({
|
||||||
|
'first': first,
|
||||||
|
'ts': first_ts,
|
||||||
|
'duration': duration,
|
||||||
|
'discontinuity_indicator': discontinuity_indicator,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'fragments': fragments,
|
||||||
|
}
|
||||||
|
|
||||||
|
def read_abst(self):
|
||||||
|
# version
|
||||||
|
self.read_unsigned_char()
|
||||||
|
# flags
|
||||||
|
self.read(3)
|
||||||
|
# BootstrapinfoVersion
|
||||||
|
bootstrap_info_version = self.read_unsigned_int()
|
||||||
|
# Profile,Live,Update,Reserved
|
||||||
|
self.read(1)
|
||||||
|
# time scale
|
||||||
|
self.read_unsigned_int()
|
||||||
|
# CurrentMediaTime
|
||||||
|
self.read_unsigned_long_long()
|
||||||
|
# SmpteTimeCodeOffset
|
||||||
|
self.read_unsigned_long_long()
|
||||||
|
# MovieIdentifier
|
||||||
|
movie_identifier = self.read_string()
|
||||||
|
server_count = self.read_unsigned_char()
|
||||||
|
# ServerEntryTable
|
||||||
|
for i in range(server_count):
|
||||||
|
self.read_string()
|
||||||
|
quality_count = self.read_unsigned_char()
|
||||||
|
# QualityEntryTable
|
||||||
|
for i in range(server_count):
|
||||||
|
self.read_string()
|
||||||
|
# DrmData
|
||||||
|
self.read_string()
|
||||||
|
# MetaData
|
||||||
|
self.read_string()
|
||||||
|
|
||||||
|
segments_count = self.read_unsigned_char()
|
||||||
|
segments = []
|
||||||
|
for i in range(segments_count):
|
||||||
|
box_size, box_type, box_data = self.read_box_info()
|
||||||
|
assert box_type == b'asrt'
|
||||||
|
segment = FlvReader(box_data).read_asrt()
|
||||||
|
segments.append(segment)
|
||||||
|
fragments_run_count = self.read_unsigned_char()
|
||||||
|
fragments = []
|
||||||
|
for i in range(fragments_run_count):
|
||||||
|
box_size, box_type, box_data = self.read_box_info()
|
||||||
|
assert box_type == b'afrt'
|
||||||
|
fragments.append(FlvReader(box_data).read_afrt())
|
||||||
|
|
||||||
|
return {
|
||||||
|
'segments': segments,
|
||||||
|
'fragments': fragments,
|
||||||
|
}
|
||||||
|
|
||||||
|
def read_bootstrap_info(self):
|
||||||
|
total_size, box_type, box_data = self.read_box_info()
|
||||||
|
assert box_type == b'abst'
|
||||||
|
return FlvReader(box_data).read_abst()
|
||||||
|
|
||||||
|
|
||||||
|
def read_bootstrap_info(bootstrap_bytes):
|
||||||
|
return FlvReader(bootstrap_bytes).read_bootstrap_info()
|
||||||
|
|
||||||
|
|
||||||
|
def build_fragments_list(boot_info):
|
||||||
|
""" Return a list of (segment, fragment) for each fragment in the video """
|
||||||
|
res = []
|
||||||
|
segment_run_table = boot_info['segments'][0]
|
||||||
|
# I've only found videos with one segment
|
||||||
|
segment_run_entry = segment_run_table['segment_run'][0]
|
||||||
|
n_frags = segment_run_entry[1]
|
||||||
|
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
||||||
|
first_frag_number = fragment_run_entry_table[0]['first']
|
||||||
|
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
|
||||||
|
res.append((1, frag_number))
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def write_flv_header(stream, metadata):
|
||||||
|
"""Writes the FLV header and the metadata to stream"""
|
||||||
|
# FLV header
|
||||||
|
stream.write(b'FLV\x01')
|
||||||
|
stream.write(b'\x05')
|
||||||
|
stream.write(b'\x00\x00\x00\x09')
|
||||||
|
# FLV File body
|
||||||
|
stream.write(b'\x00\x00\x00\x00')
|
||||||
|
# FLVTAG
|
||||||
|
# Script data
|
||||||
|
stream.write(b'\x12')
|
||||||
|
# Size of the metadata with 3 bytes
|
||||||
|
stream.write(struct_pack('!L', len(metadata))[1:])
|
||||||
|
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
|
||||||
|
stream.write(metadata)
|
||||||
|
# Magic numbers extracted from the output files produced by AdobeHDS.php
|
||||||
|
#(https://github.com/K-S-V/Scripts)
|
||||||
|
stream.write(b'\x00\x00\x01\x73')
|
||||||
|
|
||||||
|
|
||||||
|
def _add_ns(prop):
|
||||||
|
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
||||||
|
|
||||||
|
|
||||||
|
class HttpQuietDownloader(HttpFD):
|
||||||
|
def to_screen(self, *args, **kargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class F4mFD(FileDownloader):
|
||||||
|
"""
|
||||||
|
A downloader for f4m manifests or AdobeHDS.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
man_url = info_dict['url']
|
||||||
|
self.to_screen('[download] Downloading f4m manifest')
|
||||||
|
manifest = self.ydl.urlopen(man_url).read()
|
||||||
|
self.report_destination(filename)
|
||||||
|
http_dl = HttpQuietDownloader(self.ydl,
|
||||||
|
{
|
||||||
|
'continuedl': True,
|
||||||
|
'quiet': True,
|
||||||
|
'noprogress': True,
|
||||||
|
'test': self.params.get('test', False),
|
||||||
|
})
|
||||||
|
|
||||||
|
doc = etree.fromstring(manifest)
|
||||||
|
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
|
||||||
|
formats = sorted(formats, key=lambda f: f[0])
|
||||||
|
rate, media = formats[-1]
|
||||||
|
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
||||||
|
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
|
||||||
|
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
|
||||||
|
boot_info = read_bootstrap_info(bootstrap)
|
||||||
|
fragments_list = build_fragments_list(boot_info)
|
||||||
|
if self.params.get('test', False):
|
||||||
|
# We only download the first fragment
|
||||||
|
fragments_list = fragments_list[:1]
|
||||||
|
total_frags = len(fragments_list)
|
||||||
|
|
||||||
|
tmpfilename = self.temp_name(filename)
|
||||||
|
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
||||||
|
write_flv_header(dest_stream, metadata)
|
||||||
|
|
||||||
|
# This dict stores the download progress, it's updated by the progress
|
||||||
|
# hook
|
||||||
|
state = {
|
||||||
|
'downloaded_bytes': 0,
|
||||||
|
'frag_counter': 0,
|
||||||
|
}
|
||||||
|
start = time.time()
|
||||||
|
|
||||||
|
def frag_progress_hook(status):
|
||||||
|
frag_total_bytes = status.get('total_bytes', 0)
|
||||||
|
estimated_size = (state['downloaded_bytes'] +
|
||||||
|
(total_frags - state['frag_counter']) * frag_total_bytes)
|
||||||
|
if status['status'] == 'finished':
|
||||||
|
state['downloaded_bytes'] += frag_total_bytes
|
||||||
|
state['frag_counter'] += 1
|
||||||
|
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||||
|
byte_counter = state['downloaded_bytes']
|
||||||
|
else:
|
||||||
|
frag_downloaded_bytes = status['downloaded_bytes']
|
||||||
|
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
|
||||||
|
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||||
|
frag_total_bytes)
|
||||||
|
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||||
|
progress += frag_progress / float(total_frags)
|
||||||
|
|
||||||
|
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
|
||||||
|
self.report_progress(progress, format_bytes(estimated_size),
|
||||||
|
status.get('speed'), eta)
|
||||||
|
http_dl.add_progress_hook(frag_progress_hook)
|
||||||
|
|
||||||
|
frags_filenames = []
|
||||||
|
for (seg_i, frag_i) in fragments_list:
|
||||||
|
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
||||||
|
url = base_url + name
|
||||||
|
frag_filename = '%s-%s' % (tmpfilename, name)
|
||||||
|
success = http_dl.download(frag_filename, {'url': url})
|
||||||
|
if not success:
|
||||||
|
return False
|
||||||
|
with open(frag_filename, 'rb') as down:
|
||||||
|
down_data = down.read()
|
||||||
|
reader = FlvReader(down_data)
|
||||||
|
while True:
|
||||||
|
_, box_type, box_data = reader.read_box_info()
|
||||||
|
if box_type == b'mdat':
|
||||||
|
dest_stream.write(box_data)
|
||||||
|
break
|
||||||
|
frags_filenames.append(frag_filename)
|
||||||
|
|
||||||
|
self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start)
|
||||||
|
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
for frag_file in frags_filenames:
|
||||||
|
os.remove(frag_file)
|
||||||
|
|
||||||
|
fsize = os.path.getsize(encodeFilename(filename))
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': fsize,
|
||||||
|
'total_bytes': fsize,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
|
||||||
|
return True
|
||||||
44
youtube_dl/downloader/hls.py
Normal file
44
youtube_dl/downloader/hls.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from ..utils import (
|
||||||
|
encodeFilename,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HlsFD(FileDownloader):
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
url = info_dict['url']
|
||||||
|
self.report_destination(filename)
|
||||||
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
|
args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
||||||
|
'-bsf:a', 'aac_adtstoasc', tmpfilename]
|
||||||
|
|
||||||
|
for program in ['avconv', 'ffmpeg']:
|
||||||
|
try:
|
||||||
|
subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
||||||
|
break
|
||||||
|
except (OSError, IOError):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found')
|
||||||
|
cmd = [program] + args
|
||||||
|
|
||||||
|
retval = subprocess.call(cmd)
|
||||||
|
if retval == 0:
|
||||||
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': fsize,
|
||||||
|
'total_bytes': fsize,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.to_stderr(u"\n")
|
||||||
|
self.report_error(u'ffmpeg exited with code %d' % retval)
|
||||||
|
return False
|
||||||
186
youtube_dl/downloader/http.py
Normal file
186
youtube_dl/downloader/http.py
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_request,
|
||||||
|
compat_urllib_error,
|
||||||
|
ContentTooShortError,
|
||||||
|
|
||||||
|
encodeFilename,
|
||||||
|
sanitize_open,
|
||||||
|
format_bytes,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HttpFD(FileDownloader):
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
url = info_dict['url']
|
||||||
|
tmpfilename = self.temp_name(filename)
|
||||||
|
stream = None
|
||||||
|
|
||||||
|
# Do not include the Accept-Encoding header
|
||||||
|
headers = {'Youtubedl-no-compression': 'True'}
|
||||||
|
if 'user_agent' in info_dict:
|
||||||
|
headers['Youtubedl-user-agent'] = info_dict['user_agent']
|
||||||
|
basic_request = compat_urllib_request.Request(url, None, headers)
|
||||||
|
request = compat_urllib_request.Request(url, None, headers)
|
||||||
|
|
||||||
|
if self.params.get('test', False):
|
||||||
|
request.add_header('Range', 'bytes=0-10240')
|
||||||
|
|
||||||
|
# Establish possible resume length
|
||||||
|
if os.path.isfile(encodeFilename(tmpfilename)):
|
||||||
|
resume_len = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
else:
|
||||||
|
resume_len = 0
|
||||||
|
|
||||||
|
open_mode = 'wb'
|
||||||
|
if resume_len != 0:
|
||||||
|
if self.params.get('continuedl', False):
|
||||||
|
self.report_resuming_byte(resume_len)
|
||||||
|
request.add_header('Range', 'bytes=%d-' % resume_len)
|
||||||
|
open_mode = 'ab'
|
||||||
|
else:
|
||||||
|
resume_len = 0
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
retries = self.params.get('retries', 0)
|
||||||
|
while count <= retries:
|
||||||
|
# Establish connection
|
||||||
|
try:
|
||||||
|
data = compat_urllib_request.urlopen(request)
|
||||||
|
break
|
||||||
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
|
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
||||||
|
# Unexpected HTTP error
|
||||||
|
raise
|
||||||
|
elif err.code == 416:
|
||||||
|
# Unable to resume (requested range not satisfiable)
|
||||||
|
try:
|
||||||
|
# Open the connection again without the range header
|
||||||
|
data = compat_urllib_request.urlopen(basic_request)
|
||||||
|
content_length = data.info()['Content-Length']
|
||||||
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
|
if err.code < 500 or err.code >= 600:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
# Examine the reported length
|
||||||
|
if (content_length is not None and
|
||||||
|
(resume_len - 100 < int(content_length) < resume_len + 100)):
|
||||||
|
# The file had already been fully downloaded.
|
||||||
|
# Explanation to the above condition: in issue #175 it was revealed that
|
||||||
|
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
||||||
|
# changing the file size slightly and causing problems for some users. So
|
||||||
|
# I decided to implement a suggested change and consider the file
|
||||||
|
# completely downloaded if the file size differs less than 100 bytes from
|
||||||
|
# the one in the hard drive.
|
||||||
|
self.report_file_already_downloaded(filename)
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
self._hook_progress({
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
# The length does not match, we start the download over
|
||||||
|
self.report_unable_to_resume()
|
||||||
|
open_mode = 'wb'
|
||||||
|
break
|
||||||
|
# Retry
|
||||||
|
count += 1
|
||||||
|
if count <= retries:
|
||||||
|
self.report_retry(count, retries)
|
||||||
|
|
||||||
|
if count > retries:
|
||||||
|
self.report_error(u'giving up after %s retries' % retries)
|
||||||
|
return False
|
||||||
|
|
||||||
|
data_len = data.info().get('Content-length', None)
|
||||||
|
if data_len is not None:
|
||||||
|
data_len = int(data_len) + resume_len
|
||||||
|
min_data_len = self.params.get("min_filesize", None)
|
||||||
|
max_data_len = self.params.get("max_filesize", None)
|
||||||
|
if min_data_len is not None and data_len < min_data_len:
|
||||||
|
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||||
|
return False
|
||||||
|
if max_data_len is not None and data_len > max_data_len:
|
||||||
|
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
||||||
|
return False
|
||||||
|
|
||||||
|
data_len_str = format_bytes(data_len)
|
||||||
|
byte_counter = 0 + resume_len
|
||||||
|
block_size = self.params.get('buffersize', 1024)
|
||||||
|
start = time.time()
|
||||||
|
while True:
|
||||||
|
# Download and write
|
||||||
|
before = time.time()
|
||||||
|
data_block = data.read(block_size)
|
||||||
|
after = time.time()
|
||||||
|
if len(data_block) == 0:
|
||||||
|
break
|
||||||
|
byte_counter += len(data_block)
|
||||||
|
|
||||||
|
# Open file just in time
|
||||||
|
if stream is None:
|
||||||
|
try:
|
||||||
|
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
|
||||||
|
assert stream is not None
|
||||||
|
filename = self.undo_temp_name(tmpfilename)
|
||||||
|
self.report_destination(filename)
|
||||||
|
except (OSError, IOError) as err:
|
||||||
|
self.report_error(u'unable to open for writing: %s' % str(err))
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
stream.write(data_block)
|
||||||
|
except (IOError, OSError) as err:
|
||||||
|
self.to_stderr(u"\n")
|
||||||
|
self.report_error(u'unable to write data: %s' % str(err))
|
||||||
|
return False
|
||||||
|
if not self.params.get('noresizebuffer', False):
|
||||||
|
block_size = self.best_block_size(after - before, len(data_block))
|
||||||
|
|
||||||
|
# Progress message
|
||||||
|
speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
|
||||||
|
if data_len is None:
|
||||||
|
eta = percent = None
|
||||||
|
else:
|
||||||
|
percent = self.calc_percent(byte_counter, data_len)
|
||||||
|
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
||||||
|
self.report_progress(percent, data_len_str, speed, eta)
|
||||||
|
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': byte_counter,
|
||||||
|
'total_bytes': data_len,
|
||||||
|
'tmpfilename': tmpfilename,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'downloading',
|
||||||
|
'eta': eta,
|
||||||
|
'speed': speed,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Apply rate limit
|
||||||
|
self.slow_down(start, byte_counter - resume_len)
|
||||||
|
|
||||||
|
if stream is None:
|
||||||
|
self.to_stderr(u"\n")
|
||||||
|
self.report_error(u'Did not get any data blocks')
|
||||||
|
return False
|
||||||
|
stream.close()
|
||||||
|
self.report_finish(data_len_str, (time.time() - start))
|
||||||
|
if data_len is not None and byte_counter != data_len:
|
||||||
|
raise ContentTooShortError(byte_counter, int(data_len))
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
|
||||||
|
# Update file modification time
|
||||||
|
if self.params.get('updatetime', True):
|
||||||
|
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
|
||||||
|
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': byte_counter,
|
||||||
|
'total_bytes': byte_counter,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
|
||||||
|
return True
|
||||||
40
youtube_dl/downloader/mplayer.py
Normal file
40
youtube_dl/downloader/mplayer.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from ..utils import (
|
||||||
|
encodeFilename,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MplayerFD(FileDownloader):
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
url = info_dict['url']
|
||||||
|
self.report_destination(filename)
|
||||||
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
|
args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
|
||||||
|
# Check for mplayer first
|
||||||
|
try:
|
||||||
|
subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
||||||
|
except (OSError, IOError):
|
||||||
|
self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0])
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Download using mplayer.
|
||||||
|
retval = subprocess.call(args)
|
||||||
|
if retval == 0:
|
||||||
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': fsize,
|
||||||
|
'total_bytes': fsize,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.to_stderr(u"\n")
|
||||||
|
self.report_error(u'mplayer exited with code %d' % retval)
|
||||||
|
return False
|
||||||
184
youtube_dl/downloader/rtmp.py
Normal file
184
youtube_dl/downloader/rtmp.py
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from ..utils import (
|
||||||
|
encodeFilename,
|
||||||
|
format_bytes,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RtmpFD(FileDownloader):
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
def run_rtmpdump(args):
|
||||||
|
start = time.time()
|
||||||
|
resume_percent = None
|
||||||
|
resume_downloaded_data_len = None
|
||||||
|
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
|
||||||
|
cursor_in_new_line = True
|
||||||
|
proc_stderr_closed = False
|
||||||
|
while not proc_stderr_closed:
|
||||||
|
# read line from stderr
|
||||||
|
line = u''
|
||||||
|
while True:
|
||||||
|
char = proc.stderr.read(1)
|
||||||
|
if not char:
|
||||||
|
proc_stderr_closed = True
|
||||||
|
break
|
||||||
|
if char in [b'\r', b'\n']:
|
||||||
|
break
|
||||||
|
line += char.decode('ascii', 'replace')
|
||||||
|
if not line:
|
||||||
|
# proc_stderr_closed is True
|
||||||
|
continue
|
||||||
|
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
||||||
|
if mobj:
|
||||||
|
downloaded_data_len = int(float(mobj.group(1))*1024)
|
||||||
|
percent = float(mobj.group(2))
|
||||||
|
if not resume_percent:
|
||||||
|
resume_percent = percent
|
||||||
|
resume_downloaded_data_len = downloaded_data_len
|
||||||
|
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
|
||||||
|
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
|
||||||
|
data_len = None
|
||||||
|
if percent > 0:
|
||||||
|
data_len = int(downloaded_data_len * 100 / percent)
|
||||||
|
data_len_str = u'~' + format_bytes(data_len)
|
||||||
|
self.report_progress(percent, data_len_str, speed, eta)
|
||||||
|
cursor_in_new_line = False
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': downloaded_data_len,
|
||||||
|
'total_bytes': data_len,
|
||||||
|
'tmpfilename': tmpfilename,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'downloading',
|
||||||
|
'eta': eta,
|
||||||
|
'speed': speed,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# no percent for live streams
|
||||||
|
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
||||||
|
if mobj:
|
||||||
|
downloaded_data_len = int(float(mobj.group(1))*1024)
|
||||||
|
time_now = time.time()
|
||||||
|
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
||||||
|
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
||||||
|
cursor_in_new_line = False
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': downloaded_data_len,
|
||||||
|
'tmpfilename': tmpfilename,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'downloading',
|
||||||
|
'speed': speed,
|
||||||
|
})
|
||||||
|
elif self.params.get('verbose', False):
|
||||||
|
if not cursor_in_new_line:
|
||||||
|
self.to_screen(u'')
|
||||||
|
cursor_in_new_line = True
|
||||||
|
self.to_screen(u'[rtmpdump] '+line)
|
||||||
|
proc.wait()
|
||||||
|
if not cursor_in_new_line:
|
||||||
|
self.to_screen(u'')
|
||||||
|
return proc.returncode
|
||||||
|
|
||||||
|
url = info_dict['url']
|
||||||
|
player_url = info_dict.get('player_url', None)
|
||||||
|
page_url = info_dict.get('page_url', None)
|
||||||
|
app = info_dict.get('app', None)
|
||||||
|
play_path = info_dict.get('play_path', None)
|
||||||
|
tc_url = info_dict.get('tc_url', None)
|
||||||
|
flash_version = info_dict.get('flash_version', None)
|
||||||
|
live = info_dict.get('rtmp_live', False)
|
||||||
|
conn = info_dict.get('rtmp_conn', None)
|
||||||
|
|
||||||
|
self.report_destination(filename)
|
||||||
|
tmpfilename = self.temp_name(filename)
|
||||||
|
test = self.params.get('test', False)
|
||||||
|
|
||||||
|
# Check for rtmpdump first
|
||||||
|
try:
|
||||||
|
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
||||||
|
except (OSError, IOError):
|
||||||
|
self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
||||||
|
# the connection was interrumpted and resuming appears to be
|
||||||
|
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
||||||
|
basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
|
||||||
|
if player_url is not None:
|
||||||
|
basic_args += ['--swfVfy', player_url]
|
||||||
|
if page_url is not None:
|
||||||
|
basic_args += ['--pageUrl', page_url]
|
||||||
|
if app is not None:
|
||||||
|
basic_args += ['--app', app]
|
||||||
|
if play_path is not None:
|
||||||
|
basic_args += ['--playpath', play_path]
|
||||||
|
if tc_url is not None:
|
||||||
|
basic_args += ['--tcUrl', url]
|
||||||
|
if test:
|
||||||
|
basic_args += ['--stop', '1']
|
||||||
|
if flash_version is not None:
|
||||||
|
basic_args += ['--flashVer', flash_version]
|
||||||
|
if live:
|
||||||
|
basic_args += ['--live']
|
||||||
|
if conn:
|
||||||
|
basic_args += ['--conn', conn]
|
||||||
|
args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
|
||||||
|
|
||||||
|
if sys.platform == 'win32' and sys.version_info < (3, 0):
|
||||||
|
# Windows subprocess module does not actually support Unicode
|
||||||
|
# on Python 2.x
|
||||||
|
# See http://stackoverflow.com/a/9951851/35070
|
||||||
|
subprocess_encoding = sys.getfilesystemencoding()
|
||||||
|
args = [a.encode(subprocess_encoding, 'ignore') for a in args]
|
||||||
|
else:
|
||||||
|
subprocess_encoding = None
|
||||||
|
|
||||||
|
if self.params.get('verbose', False):
|
||||||
|
if subprocess_encoding:
|
||||||
|
str_args = [
|
||||||
|
a.decode(subprocess_encoding) if isinstance(a, bytes) else a
|
||||||
|
for a in args]
|
||||||
|
else:
|
||||||
|
str_args = args
|
||||||
|
try:
|
||||||
|
import pipes
|
||||||
|
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
|
||||||
|
except ImportError:
|
||||||
|
shell_quote = repr
|
||||||
|
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(str_args))
|
||||||
|
|
||||||
|
retval = run_rtmpdump(args)
|
||||||
|
|
||||||
|
while (retval == 2 or retval == 1) and not test:
|
||||||
|
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
|
||||||
|
time.sleep(5.0) # This seems to be needed
|
||||||
|
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
||||||
|
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
if prevsize == cursize and retval == 1:
|
||||||
|
break
|
||||||
|
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
|
||||||
|
if prevsize == cursize and retval == 2 and cursize > 1024:
|
||||||
|
self.to_screen(u'[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
|
||||||
|
retval = 0
|
||||||
|
break
|
||||||
|
if retval == 0 or (test and retval == 2):
|
||||||
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
self.to_screen(u'[rtmpdump] %s bytes' % fsize)
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': fsize,
|
||||||
|
'total_bytes': fsize,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.to_stderr(u"\n")
|
||||||
|
self.report_error(u'rtmpdump exited with code %d' % retval)
|
||||||
|
return False
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
from .appletrailers import AppleTrailersIE
|
from .academicearth import AcademicEarthCourseIE
|
||||||
from .addanime import AddAnimeIE
|
from .addanime import AddAnimeIE
|
||||||
from .anitube import AnitubeIE
|
from .anitube import AnitubeIE
|
||||||
|
from .aparat import AparatIE
|
||||||
|
from .appletrailers import AppleTrailersIE
|
||||||
from .archiveorg import ArchiveOrgIE
|
from .archiveorg import ArchiveOrgIE
|
||||||
from .ard import ARDIE
|
from .ard import ARDIE
|
||||||
from .arte import (
|
from .arte import (
|
||||||
@@ -13,6 +15,7 @@ from .arte import (
|
|||||||
from .auengine import AUEngineIE
|
from .auengine import AUEngineIE
|
||||||
from .bambuser import BambuserIE, BambuserChannelIE
|
from .bambuser import BambuserIE, BambuserChannelIE
|
||||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||||
|
from .bbccouk import BBCCoUkIE
|
||||||
from .blinkx import BlinkxIE
|
from .blinkx import BlinkxIE
|
||||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||||
from .bloomberg import BloombergIE
|
from .bloomberg import BloombergIE
|
||||||
@@ -23,14 +26,21 @@ from .canalplus import CanalplusIE
|
|||||||
from .canalc2 import Canalc2IE
|
from .canalc2 import Canalc2IE
|
||||||
from .cbs import CBSIE
|
from .cbs import CBSIE
|
||||||
from .channel9 import Channel9IE
|
from .channel9 import Channel9IE
|
||||||
|
from .chilloutzone import ChilloutzoneIE
|
||||||
from .cinemassacre import CinemassacreIE
|
from .cinemassacre import CinemassacreIE
|
||||||
from .clipfish import ClipfishIE
|
from .clipfish import ClipfishIE
|
||||||
|
from .cliphunter import CliphunterIE
|
||||||
from .clipsyndicate import ClipsyndicateIE
|
from .clipsyndicate import ClipsyndicateIE
|
||||||
from .cnn import CNNIE
|
from .cmt import CMTIE
|
||||||
|
from .cnn import (
|
||||||
|
CNNIE,
|
||||||
|
CNNBlogsIE,
|
||||||
|
)
|
||||||
from .collegehumor import CollegeHumorIE
|
from .collegehumor import CollegeHumorIE
|
||||||
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||||
from .condenast import CondeNastIE
|
from .condenast import CondeNastIE
|
||||||
from .criterion import CriterionIE
|
from .criterion import CriterionIE
|
||||||
|
from .crunchyroll import CrunchyrollIE
|
||||||
from .cspan import CSpanIE
|
from .cspan import CSpanIE
|
||||||
from .d8 import D8IE
|
from .d8 import D8IE
|
||||||
from .dailymotion import (
|
from .dailymotion import (
|
||||||
@@ -43,27 +53,37 @@ from .depositfiles import DepositFilesIE
|
|||||||
from .dotsub import DotsubIE
|
from .dotsub import DotsubIE
|
||||||
from .dreisat import DreiSatIE
|
from .dreisat import DreiSatIE
|
||||||
from .defense import DefenseGouvFrIE
|
from .defense import DefenseGouvFrIE
|
||||||
|
from .discovery import DiscoveryIE
|
||||||
|
from .dropbox import DropboxIE
|
||||||
from .ebaumsworld import EbaumsWorldIE
|
from .ebaumsworld import EbaumsWorldIE
|
||||||
from .ehow import EHowIE
|
from .ehow import EHowIE
|
||||||
from .eighttracks import EightTracksIE
|
from .eighttracks import EightTracksIE
|
||||||
from .eitb import EitbIE
|
from .eitb import EitbIE
|
||||||
|
from .elpais import ElPaisIE
|
||||||
from .escapist import EscapistIE
|
from .escapist import EscapistIE
|
||||||
|
from .everyonesmixtape import EveryonesMixtapeIE
|
||||||
from .exfm import ExfmIE
|
from .exfm import ExfmIE
|
||||||
from .extremetube import ExtremeTubeIE
|
from .extremetube import ExtremeTubeIE
|
||||||
from .facebook import FacebookIE
|
from .facebook import FacebookIE
|
||||||
from .faz import FazIE
|
from .faz import FazIE
|
||||||
|
from .firstpost import FirstpostIE
|
||||||
|
from .firsttv import FirstTVIE
|
||||||
from .fktv import (
|
from .fktv import (
|
||||||
FKTVIE,
|
FKTVIE,
|
||||||
FKTVPosteckeIE,
|
FKTVPosteckeIE,
|
||||||
)
|
)
|
||||||
from .flickr import FlickrIE
|
from .flickr import FlickrIE
|
||||||
|
from .fourtube import FourTubeIE
|
||||||
|
from .franceinter import FranceInterIE
|
||||||
from .francetv import (
|
from .francetv import (
|
||||||
PluzzIE,
|
PluzzIE,
|
||||||
FranceTvInfoIE,
|
FranceTvInfoIE,
|
||||||
FranceTVIE,
|
FranceTVIE,
|
||||||
GenerationQuoiIE
|
GenerationQuoiIE,
|
||||||
|
CultureboxIE,
|
||||||
)
|
)
|
||||||
from .freesound import FreesoundIE
|
from .freesound import FreesoundIE
|
||||||
|
from .freespeech import FreespeechIE
|
||||||
from .funnyordie import FunnyOrDieIE
|
from .funnyordie import FunnyOrDieIE
|
||||||
from .gamekings import GamekingsIE
|
from .gamekings import GamekingsIE
|
||||||
from .gamespot import GameSpotIE
|
from .gamespot import GameSpotIE
|
||||||
@@ -72,31 +92,59 @@ from .generic import GenericIE
|
|||||||
from .googleplus import GooglePlusIE
|
from .googleplus import GooglePlusIE
|
||||||
from .googlesearch import GoogleSearchIE
|
from .googlesearch import GoogleSearchIE
|
||||||
from .hark import HarkIE
|
from .hark import HarkIE
|
||||||
|
from .helsinki import HelsinkiIE
|
||||||
from .hotnewhiphop import HotNewHipHopIE
|
from .hotnewhiphop import HotNewHipHopIE
|
||||||
from .howcast import HowcastIE
|
from .howcast import HowcastIE
|
||||||
|
from .huffpost import HuffPostIE
|
||||||
from .hypem import HypemIE
|
from .hypem import HypemIE
|
||||||
from .ign import IGNIE, OneUPIE
|
from .ign import IGNIE, OneUPIE
|
||||||
from .imdb import ImdbIE
|
from .imdb import (
|
||||||
|
ImdbIE,
|
||||||
|
ImdbListIE
|
||||||
|
)
|
||||||
from .ina import InaIE
|
from .ina import InaIE
|
||||||
from .infoq import InfoQIE
|
from .infoq import InfoQIE
|
||||||
from .instagram import InstagramIE
|
from .instagram import InstagramIE
|
||||||
from .internetvideoarchive import InternetVideoArchiveIE
|
from .internetvideoarchive import InternetVideoArchiveIE
|
||||||
|
from .iprima import IPrimaIE
|
||||||
|
from .ivi import (
|
||||||
|
IviIE,
|
||||||
|
IviCompilationIE
|
||||||
|
)
|
||||||
|
from .jadorecettepub import JadoreCettePubIE
|
||||||
from .jeuxvideo import JeuxVideoIE
|
from .jeuxvideo import JeuxVideoIE
|
||||||
from .jukebox import JukeboxIE
|
from .jukebox import JukeboxIE
|
||||||
from .justintv import JustinTVIE
|
from .justintv import JustinTVIE
|
||||||
|
from .jpopsukitv import JpopsukiIE
|
||||||
from .kankan import KankanIE
|
from .kankan import KankanIE
|
||||||
from .keezmovies import KeezMoviesIE
|
from .keezmovies import KeezMoviesIE
|
||||||
|
from .khanacademy import KhanAcademyIE
|
||||||
from .kickstarter import KickStarterIE
|
from .kickstarter import KickStarterIE
|
||||||
from .keek import KeekIE
|
from .keek import KeekIE
|
||||||
|
from .kontrtube import KontrTubeIE
|
||||||
|
from .la7 import LA7IE
|
||||||
|
from .lifenews import LifeNewsIE
|
||||||
from .liveleak import LiveLeakIE
|
from .liveleak import LiveLeakIE
|
||||||
from .livestream import LivestreamIE, LivestreamOriginalIE
|
from .livestream import LivestreamIE, LivestreamOriginalIE
|
||||||
|
from .lynda import (
|
||||||
|
LyndaIE,
|
||||||
|
LyndaCourseIE
|
||||||
|
)
|
||||||
|
from .m6 import M6IE
|
||||||
|
from .macgamestore import MacGameStoreIE
|
||||||
|
from .malemotion import MalemotionIE
|
||||||
from .mdr import MDRIE
|
from .mdr import MDRIE
|
||||||
from .metacafe import MetacafeIE
|
from .metacafe import MetacafeIE
|
||||||
from .metacritic import MetacriticIE
|
from .metacritic import MetacriticIE
|
||||||
from .mit import TechTVMITIE, MITIE
|
from .mit import TechTVMITIE, MITIE
|
||||||
from .mixcloud import MixcloudIE
|
from .mixcloud import MixcloudIE
|
||||||
|
from .mpora import MporaIE
|
||||||
from .mofosex import MofosexIE
|
from .mofosex import MofosexIE
|
||||||
from .mtv import MTVIE
|
from .mooshare import MooshareIE
|
||||||
|
from .mtv import (
|
||||||
|
MTVIE,
|
||||||
|
MTVIggyIE,
|
||||||
|
)
|
||||||
from .muzu import MuzuTVIE
|
from .muzu import MuzuTVIE
|
||||||
from .myspace import MySpaceIE
|
from .myspace import MySpaceIE
|
||||||
from .myspass import MySpassIE
|
from .myspass import MySpassIE
|
||||||
@@ -104,11 +152,16 @@ from .myvideo import MyVideoIE
|
|||||||
from .naver import NaverIE
|
from .naver import NaverIE
|
||||||
from .nba import NBAIE
|
from .nba import NBAIE
|
||||||
from .nbc import NBCNewsIE
|
from .nbc import NBCNewsIE
|
||||||
|
from .ndr import NDRIE
|
||||||
from .ndtv import NDTVIE
|
from .ndtv import NDTVIE
|
||||||
from .newgrounds import NewgroundsIE
|
from .newgrounds import NewgroundsIE
|
||||||
|
from .nfb import NFBIE
|
||||||
from .nhl import NHLIE, NHLVideocenterIE
|
from .nhl import NHLIE, NHLVideocenterIE
|
||||||
from .niconico import NiconicoIE
|
from .niconico import NiconicoIE
|
||||||
from .ninegag import NineGagIE
|
from .ninegag import NineGagIE
|
||||||
|
from .normalboots import NormalbootsIE
|
||||||
|
from .novamov import NovamovIE
|
||||||
|
from .nowness import NownessIE
|
||||||
from .nowvideo import NowVideoIE
|
from .nowvideo import NowVideoIE
|
||||||
from .ooyala import OoyalaIE
|
from .ooyala import OoyalaIE
|
||||||
from .orf import ORFIE
|
from .orf import ORFIE
|
||||||
@@ -127,7 +180,13 @@ from .ro220 import Ro220IE
|
|||||||
from .rottentomatoes import RottenTomatoesIE
|
from .rottentomatoes import RottenTomatoesIE
|
||||||
from .roxwel import RoxwelIE
|
from .roxwel import RoxwelIE
|
||||||
from .rtlnow import RTLnowIE
|
from .rtlnow import RTLnowIE
|
||||||
from .rutube import RutubeIE
|
from .rutube import (
|
||||||
|
RutubeIE,
|
||||||
|
RutubeChannelIE,
|
||||||
|
RutubeMovieIE,
|
||||||
|
RutubePersonIE,
|
||||||
|
)
|
||||||
|
from .servingsys import ServingSysIE
|
||||||
from .sina import SinaIE
|
from .sina import SinaIE
|
||||||
from .slashdot import SlashdotIE
|
from .slashdot import SlashdotIE
|
||||||
from .slideshare import SlideshareIE
|
from .slideshare import SlideshareIE
|
||||||
@@ -146,17 +205,22 @@ from .southparkstudios import (
|
|||||||
from .space import SpaceIE
|
from .space import SpaceIE
|
||||||
from .spankwire import SpankwireIE
|
from .spankwire import SpankwireIE
|
||||||
from .spiegel import SpiegelIE
|
from .spiegel import SpiegelIE
|
||||||
|
from .spike import SpikeIE
|
||||||
from .stanfordoc import StanfordOpenClassroomIE
|
from .stanfordoc import StanfordOpenClassroomIE
|
||||||
from .statigram import StatigramIE
|
from .statigram import StatigramIE
|
||||||
from .steam import SteamIE
|
from .steam import SteamIE
|
||||||
from .streamcloud import StreamcloudIE
|
from .streamcloud import StreamcloudIE
|
||||||
|
from .streamcz import StreamCZIE
|
||||||
|
from .syfy import SyfyIE
|
||||||
from .sztvhu import SztvHuIE
|
from .sztvhu import SztvHuIE
|
||||||
from .teamcoco import TeamcocoIE
|
from .teamcoco import TeamcocoIE
|
||||||
from .techtalks import TechTalksIE
|
from .techtalks import TechTalksIE
|
||||||
from .ted import TEDIE
|
from .ted import TEDIE
|
||||||
|
from .testurl import TestURLIE
|
||||||
from .tf1 import TF1IE
|
from .tf1 import TF1IE
|
||||||
from .theplatform import ThePlatformIE
|
from .theplatform import ThePlatformIE
|
||||||
from .thisav import ThisAVIE
|
from .thisav import ThisAVIE
|
||||||
|
from .tinypic import TinyPicIE
|
||||||
from .toutv import TouTvIE
|
from .toutv import TouTvIE
|
||||||
from .traileraddict import TrailerAddictIE
|
from .traileraddict import TrailerAddictIE
|
||||||
from .trilulilu import TriluliluIE
|
from .trilulilu import TriluliluIE
|
||||||
@@ -170,6 +234,7 @@ from .ustream import UstreamIE, UstreamChannelIE
|
|||||||
from .vbox7 import Vbox7IE
|
from .vbox7 import Vbox7IE
|
||||||
from .veehd import VeeHDIE
|
from .veehd import VeeHDIE
|
||||||
from .veoh import VeohIE
|
from .veoh import VeohIE
|
||||||
|
from .vesti import VestiIE
|
||||||
from .vevo import VevoIE
|
from .vevo import VevoIE
|
||||||
from .vice import ViceIE
|
from .vice import ViceIE
|
||||||
from .viddler import ViddlerIE
|
from .viddler import ViddlerIE
|
||||||
@@ -182,12 +247,13 @@ from .vimeo import (
|
|||||||
VimeoUserIE,
|
VimeoUserIE,
|
||||||
VimeoAlbumIE,
|
VimeoAlbumIE,
|
||||||
VimeoGroupsIE,
|
VimeoGroupsIE,
|
||||||
|
VimeoReviewIE,
|
||||||
)
|
)
|
||||||
from .vine import VineIE
|
from .vine import VineIE
|
||||||
from .viki import VikiIE
|
from .viki import VikiIE
|
||||||
from .vk import VKIE
|
from .vk import VKIE
|
||||||
|
from .vube import VubeIE
|
||||||
from .wat import WatIE
|
from .wat import WatIE
|
||||||
from .websurg import WeBSurgIE
|
|
||||||
from .weibo import WeiboIE
|
from .weibo import WeiboIE
|
||||||
from .wimp import WimpIE
|
from .wimp import WimpIE
|
||||||
from .wistia import WistiaIE
|
from .wistia import WistiaIE
|
||||||
|
|||||||
32
youtube_dl/extractor/academicearth.py
Normal file
32
youtube_dl/extractor/academicearth.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class AcademicEarthCourseIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/(?:courses|playlists)/(?P<id>[^?#/]+)'
|
||||||
|
IE_NAME = 'AcademicEarth:Course'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
m = re.match(self._VALID_URL, url)
|
||||||
|
playlist_id = m.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<h1 class="playlist-name">(.*?)</h1>', webpage, u'title')
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'<p class="excerpt">(.*?)</p>',
|
||||||
|
webpage, u'description', fatal=False)
|
||||||
|
urls = re.findall(
|
||||||
|
r'<h3 class="lecture-title"><a target="_blank" href="([^"]+)">',
|
||||||
|
webpage)
|
||||||
|
entries = [self.url_result(u) for u in urls]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': playlist_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
56
youtube_dl/extractor/aparat.py
Normal file
56
youtube_dl/extractor/aparat.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
#coding: utf-8
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
HEADRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AparatIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'^https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
u'url': u'http://www.aparat.com/v/wP8On',
|
||||||
|
u'file': u'wP8On.mp4',
|
||||||
|
u'md5': u'6714e0af7e0d875c5a39c4dc4ab46ad1',
|
||||||
|
u'info_dict': {
|
||||||
|
u"title": u"تیم گلکسی 11 - زومیت",
|
||||||
|
},
|
||||||
|
#u'skip': u'Extremely unreliable',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
m = re.match(self._VALID_URL, url)
|
||||||
|
video_id = m.group('id')
|
||||||
|
|
||||||
|
# Note: There is an easier-to-parse configuration at
|
||||||
|
# http://www.aparat.com/video/video/config/videohash/%video_id
|
||||||
|
# but the URL in there does not work
|
||||||
|
embed_url = (u'http://www.aparat.com/video/video/embed/videohash/' +
|
||||||
|
video_id + u'/vt/frame')
|
||||||
|
webpage = self._download_webpage(embed_url, video_id)
|
||||||
|
|
||||||
|
video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage)
|
||||||
|
for i, video_url in enumerate(video_urls):
|
||||||
|
req = HEADRequest(video_url)
|
||||||
|
res = self._request_webpage(
|
||||||
|
req, video_id, note=u'Testing video URL %d' % i, errnote=False)
|
||||||
|
if res:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ExtractorError(u'No working video URLs found')
|
||||||
|
|
||||||
|
title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title')
|
||||||
|
thumbnail = self._search_regex(
|
||||||
|
r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
}
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@@ -11,46 +13,46 @@ from ..utils import (
|
|||||||
class AppleTrailersIE(InfoExtractor):
|
class AppleTrailersIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u"url": u"http://trailers.apple.com/trailers/wb/manofsteel/",
|
"url": "http://trailers.apple.com/trailers/wb/manofsteel/",
|
||||||
u"playlist": [
|
"playlist": [
|
||||||
{
|
{
|
||||||
u"file": u"manofsteel-trailer4.mov",
|
"file": "manofsteel-trailer4.mov",
|
||||||
u"md5": u"d97a8e575432dbcb81b7c3acb741f8a8",
|
"md5": "d97a8e575432dbcb81b7c3acb741f8a8",
|
||||||
u"info_dict": {
|
"info_dict": {
|
||||||
u"duration": 111,
|
"duration": 111,
|
||||||
u"title": u"Trailer 4",
|
"title": "Trailer 4",
|
||||||
u"upload_date": u"20130523",
|
"upload_date": "20130523",
|
||||||
u"uploader_id": u"wb",
|
"uploader_id": "wb",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u"file": u"manofsteel-trailer3.mov",
|
"file": "manofsteel-trailer3.mov",
|
||||||
u"md5": u"b8017b7131b721fb4e8d6f49e1df908c",
|
"md5": "b8017b7131b721fb4e8d6f49e1df908c",
|
||||||
u"info_dict": {
|
"info_dict": {
|
||||||
u"duration": 182,
|
"duration": 182,
|
||||||
u"title": u"Trailer 3",
|
"title": "Trailer 3",
|
||||||
u"upload_date": u"20130417",
|
"upload_date": "20130417",
|
||||||
u"uploader_id": u"wb",
|
"uploader_id": "wb",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u"file": u"manofsteel-trailer.mov",
|
"file": "manofsteel-trailer.mov",
|
||||||
u"md5": u"d0f1e1150989b9924679b441f3404d48",
|
"md5": "d0f1e1150989b9924679b441f3404d48",
|
||||||
u"info_dict": {
|
"info_dict": {
|
||||||
u"duration": 148,
|
"duration": 148,
|
||||||
u"title": u"Trailer",
|
"title": "Trailer",
|
||||||
u"upload_date": u"20121212",
|
"upload_date": "20121212",
|
||||||
u"uploader_id": u"wb",
|
"uploader_id": "wb",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u"file": u"manofsteel-teaser.mov",
|
"file": "manofsteel-teaser.mov",
|
||||||
u"md5": u"5fe08795b943eb2e757fa95cb6def1cb",
|
"md5": "5fe08795b943eb2e757fa95cb6def1cb",
|
||||||
u"info_dict": {
|
"info_dict": {
|
||||||
u"duration": 93,
|
"duration": 93,
|
||||||
u"title": u"Teaser",
|
"title": "Teaser",
|
||||||
u"upload_date": u"20120721",
|
"upload_date": "20120721",
|
||||||
u"uploader_id": u"wb",
|
"uploader_id": "wb",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -110,7 +112,8 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
'width': format['width'],
|
'width': format['width'],
|
||||||
'height': int(format['height']),
|
'height': int(format['height']),
|
||||||
})
|
})
|
||||||
formats = sorted(formats, key=lambda f: (f['height'], f['width']))
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
playlist.append({
|
playlist.append({
|
||||||
'_type': 'video',
|
'_type': 'video',
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -13,23 +14,22 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
IE_DESC = 'archive.org videos'
|
IE_DESC = 'archive.org videos'
|
||||||
_VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
|
_VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
|
"url": "http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
|
||||||
u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
|
'file': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
|
||||||
u'md5': u'8af1d4cf447933ed3c7f4871162602db',
|
'md5': '8af1d4cf447933ed3c7f4871162602db',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"title": u"1968 Demo - FJCC Conference Presentation Reel #1",
|
"title": "1968 Demo - FJCC Conference Presentation Reel #1",
|
||||||
u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
|
"description": "Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
|
||||||
u"upload_date": u"19681210",
|
"upload_date": "19681210",
|
||||||
u"uploader": u"SRI International"
|
"uploader": "SRI International"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
json_url = url + (u'?' if u'?' in url else '&') + u'output=json'
|
json_url = url + ('?' if '?' in url else '&') + 'output=json'
|
||||||
json_data = self._download_webpage(json_url, video_id)
|
json_data = self._download_webpage(json_url, video_id)
|
||||||
data = json.loads(json_data)
|
data = json.loads(json_data)
|
||||||
|
|
||||||
@@ -38,16 +38,16 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
uploader = data['metadata']['creator'][0]
|
uploader = data['metadata']['creator'][0]
|
||||||
upload_date = unified_strdate(data['metadata']['date'][0])
|
upload_date = unified_strdate(data['metadata']['date'][0])
|
||||||
|
|
||||||
formats = [{
|
formats = [
|
||||||
|
{
|
||||||
'format': fdata['format'],
|
'format': fdata['format'],
|
||||||
'url': 'http://' + data['server'] + data['dir'] + fn,
|
'url': 'http://' + data['server'] + data['dir'] + fn,
|
||||||
'file_size': int(fdata['size']),
|
'file_size': int(fdata['size']),
|
||||||
}
|
}
|
||||||
for fn, fdata in data['files'].items()
|
for fn, fdata in data['files'].items()
|
||||||
if 'Video' in fdata['format']]
|
if 'Video' in fdata['format']]
|
||||||
formats.sort(key=lambda fdata: fdata['file_size'])
|
|
||||||
for f in formats:
|
self._sort_formats(formats)
|
||||||
f['ext'] = determine_ext(f['url'])
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'video',
|
'_type': 'video',
|
||||||
|
|||||||
@@ -1,22 +1,28 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ARDIE(InfoExtractor):
|
class ARDIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
|
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
|
||||||
_TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
|
|
||||||
_MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640',
|
'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',
|
||||||
u'file': u'14077640.mp4',
|
'file': '19288786.mp4',
|
||||||
u'md5': u'6ca8824255460c787376353f9e20bbd8',
|
'md5': '515bf47ce209fb3f5a61b7aad364634c',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"title": u"11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
|
'title': 'Edward Snowden im Interview - Held oder Verräter?',
|
||||||
|
'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',
|
||||||
|
'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',
|
||||||
},
|
},
|
||||||
u'skip': u'Requires rtmpdump'
|
'skip': 'Blocked outside of Germany',
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -29,26 +35,49 @@ class ARDIE(InfoExtractor):
|
|||||||
else:
|
else:
|
||||||
video_id = m.group('video_id')
|
video_id = m.group('video_id')
|
||||||
|
|
||||||
# determine title and media streams from webpage
|
webpage = self._download_webpage(url, video_id)
|
||||||
html = self._download_webpage(url, video_id)
|
|
||||||
title = re.search(self._TITLE, html).group('title')
|
title = self._html_search_regex(
|
||||||
streams = [mo.groupdict() for mo in re.finditer(self._MEDIA_STREAM, html)]
|
r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>', webpage, 'title')
|
||||||
|
description = self._html_search_meta(
|
||||||
|
'dcterms.abstract', webpage, 'description')
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
|
||||||
|
streams = [
|
||||||
|
mo.groupdict()
|
||||||
|
for mo in re.finditer(
|
||||||
|
r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)', webpage)]
|
||||||
if not streams:
|
if not streams:
|
||||||
assert '"fsk"' in html
|
if '"fsk"' in webpage:
|
||||||
raise ExtractorError(u'This video is only available after 8:00 pm')
|
raise ExtractorError('This video is only available after 20:00')
|
||||||
|
|
||||||
# choose default media type and highest quality for now
|
formats = []
|
||||||
stream = max([s for s in streams if int(s["media_type"]) == 0],
|
for s in streams:
|
||||||
key=lambda s: int(s["quality"]))
|
format = {
|
||||||
|
'quality': int(s['quality']),
|
||||||
# there's two possibilities: RTMP stream or HTTP download
|
}
|
||||||
info = {'id': video_id, 'title': title, 'ext': 'mp4'}
|
if s.get('rtmp_url'):
|
||||||
if stream['rtmp_url']:
|
format['protocol'] = 'rtmp'
|
||||||
self.to_screen(u'RTMP download detected')
|
format['url'] = s['rtmp_url']
|
||||||
assert stream['video_url'].startswith('mp4:')
|
format['playpath'] = s['video_url']
|
||||||
info["url"] = stream["rtmp_url"]
|
|
||||||
info["play_path"] = stream['video_url']
|
|
||||||
else:
|
else:
|
||||||
assert stream["video_url"].endswith('.mp4')
|
format['url'] = s['video_url']
|
||||||
info["url"] = stream["video_url"]
|
|
||||||
return [info]
|
quality_name = self._search_regex(
|
||||||
|
r'[,.]([a-zA-Z0-9_-]+),?\.mp4', format['url'],
|
||||||
|
'quality name', default='NA')
|
||||||
|
format['format_id'] = '%s-%s-%s-%s' % (
|
||||||
|
determine_ext(format['url']), quality_name, s['media_type'],
|
||||||
|
s['quality'])
|
||||||
|
|
||||||
|
formats.append(format)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@@ -22,7 +24,7 @@ class ArteTvIE(InfoExtractor):
|
|||||||
_LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
_LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
||||||
_LIVE_URL = r'index-[0-9]+\.html$'
|
_LIVE_URL = r'index-[0-9]+\.html$'
|
||||||
|
|
||||||
IE_NAME = u'arte.tv'
|
IE_NAME = 'arte.tv'
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
@@ -37,7 +39,7 @@ class ArteTvIE(InfoExtractor):
|
|||||||
# r'src="(.*?/videothek_js.*?\.js)',
|
# r'src="(.*?/videothek_js.*?\.js)',
|
||||||
# 0,
|
# 0,
|
||||||
# [
|
# [
|
||||||
# (1, 'url', u'Invalid URL: %s' % url)
|
# (1, 'url', 'Invalid URL: %s' % url)
|
||||||
# ]
|
# ]
|
||||||
# )
|
# )
|
||||||
# http_host = url.split('/')[2]
|
# http_host = url.split('/')[2]
|
||||||
@@ -49,12 +51,12 @@ class ArteTvIE(InfoExtractor):
|
|||||||
# '(rtmp://.*?)\'',
|
# '(rtmp://.*?)\'',
|
||||||
# re.DOTALL,
|
# re.DOTALL,
|
||||||
# [
|
# [
|
||||||
# (1, 'path', u'could not extract video path: %s' % url),
|
# (1, 'path', 'could not extract video path: %s' % url),
|
||||||
# (2, 'player', u'could not extract video player: %s' % url),
|
# (2, 'player', 'could not extract video player: %s' % url),
|
||||||
# (3, 'url', u'could not extract video url: %s' % url)
|
# (3, 'url', 'could not extract video url: %s' % url)
|
||||||
# ]
|
# ]
|
||||||
# )
|
# )
|
||||||
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
|
# video_url = '%s/%s' % (info.get('url'), info.get('path'))
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VIDEOS_URL, url)
|
mobj = re.match(self._VIDEOS_URL, url)
|
||||||
@@ -107,9 +109,9 @@ class ArteTvIE(InfoExtractor):
|
|||||||
def _extract_liveweb(self, url, name, lang):
|
def _extract_liveweb(self, url, name, lang):
|
||||||
"""Extract form http://liveweb.arte.tv/"""
|
"""Extract form http://liveweb.arte.tv/"""
|
||||||
webpage = self._download_webpage(url, name)
|
webpage = self._download_webpage(url, name)
|
||||||
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
|
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id')
|
||||||
config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
||||||
video_id, u'Downloading information')
|
video_id, 'Downloading information')
|
||||||
event_doc = config_doc.find('event')
|
event_doc = config_doc.find('event')
|
||||||
url_node = event_doc.find('video').find('urlHd')
|
url_node = event_doc.find('video').find('urlHd')
|
||||||
if url_node is None:
|
if url_node is None:
|
||||||
@@ -124,7 +126,7 @@ class ArteTvIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class ArteTVPlus7IE(InfoExtractor):
|
class ArteTVPlus7IE(InfoExtractor):
|
||||||
IE_NAME = u'arte.tv:+7'
|
IE_NAME = 'arte.tv:+7'
|
||||||
_VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
_VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -207,7 +209,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
if bitrate is not None:
|
if bitrate is not None:
|
||||||
quality += '-%d' % bitrate
|
quality += '-%d' % bitrate
|
||||||
if format_info.get('versionCode') is not None:
|
if format_info.get('versionCode') is not None:
|
||||||
format_id = u'%s-%s' % (quality, format_info['versionCode'])
|
format_id = '%s-%s' % (quality, format_info['versionCode'])
|
||||||
else:
|
else:
|
||||||
format_id = quality
|
format_id = quality
|
||||||
info = {
|
info = {
|
||||||
@@ -216,7 +218,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
'width': format_info.get('width'),
|
'width': format_info.get('width'),
|
||||||
'height': height,
|
'height': height,
|
||||||
}
|
}
|
||||||
if format_info['mediaType'] == u'rtmp':
|
if format_info['mediaType'] == 'rtmp':
|
||||||
info['url'] = format_info['streamer']
|
info['url'] = format_info['streamer']
|
||||||
info['play_path'] = 'mp4:' + format_info['url']
|
info['play_path'] = 'mp4:' + format_info['url']
|
||||||
info['ext'] = 'flv'
|
info['ext'] = 'flv'
|
||||||
@@ -231,27 +233,27 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
|
|
||||||
# It also uses the arte_vp_url url from the webpage to extract the information
|
# It also uses the arte_vp_url url from the webpage to extract the information
|
||||||
class ArteTVCreativeIE(ArteTVPlus7IE):
|
class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||||
IE_NAME = u'arte.tv:creative'
|
IE_NAME = 'arte.tv:creative'
|
||||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
|
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
||||||
u'file': u'050489-002.mp4',
|
'file': '050489-002.mp4',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
|
'title': 'Agentur Amateur / Agence Amateur #2 : Corporate Design',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ArteTVFutureIE(ArteTVPlus7IE):
|
class ArteTVFutureIE(ArteTVPlus7IE):
|
||||||
IE_NAME = u'arte.tv:future'
|
IE_NAME = 'arte.tv:future'
|
||||||
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
|
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
|
'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
|
||||||
u'file': u'050940-003.mp4',
|
'file': '050940-003.mp4',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Les champignons au secours de la planète',
|
'title': 'Les champignons au secours de la planète',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -263,23 +265,9 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
|||||||
|
|
||||||
|
|
||||||
class ArteTVDDCIE(ArteTVPlus7IE):
|
class ArteTVDDCIE(ArteTVPlus7IE):
|
||||||
IE_NAME = u'arte.tv:ddc'
|
IE_NAME = 'arte.tv:ddc'
|
||||||
_VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
|
_VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
|
||||||
|
|
||||||
_TEST = {
|
|
||||||
u'url': u'http://ddc.arte.tv/folge/neues-aus-mauretanien',
|
|
||||||
u'file': u'049881-009_PLUS7-D.flv',
|
|
||||||
u'info_dict': {
|
|
||||||
u'title': u'Mit offenen Karten',
|
|
||||||
u'description': u'md5:57929b0eaeddeb8a0c983f58e9ebd3b6',
|
|
||||||
u'upload_date': u'20131207',
|
|
||||||
},
|
|
||||||
u'params': {
|
|
||||||
# rtmp download
|
|
||||||
u'skip_download': True,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id, lang = self._extract_url_info(url)
|
video_id, lang = self._extract_url_info(url)
|
||||||
if lang == 'folge':
|
if lang == 'folge':
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -7,13 +9,14 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AUEngineIE(InfoExtractor):
|
class AUEngineIE(InfoExtractor):
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
|
'url': 'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
|
||||||
u'file': u'lfvlytY6.mp4',
|
'file': 'lfvlytY6.mp4',
|
||||||
u'md5': u'48972bdbcf1a3a2f5533e62425b41d4f',
|
'md5': '48972bdbcf1a3a2f5533e62425b41d4f',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"title": u"[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]"
|
'title': '[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed\.php\?.*?file=([^&]+).*?'
|
_VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed\.php\?.*?file=([^&]+).*?'
|
||||||
@@ -23,7 +26,7 @@ class AUEngineIE(InfoExtractor):
|
|||||||
video_id = mobj.group(1)
|
video_id = mobj.group(1)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
||||||
webpage, u'title')
|
webpage, 'title')
|
||||||
title = title.strip()
|
title = title.strip()
|
||||||
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
|
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
|
||||||
links = map(compat_urllib_parse.unquote, links)
|
links = map(compat_urllib_parse.unquote, links)
|
||||||
@@ -37,7 +40,7 @@ class AUEngineIE(InfoExtractor):
|
|||||||
video_url = link
|
video_url = link
|
||||||
if not video_url:
|
if not video_url:
|
||||||
raise ExtractorError(u'Could not find video URL')
|
raise ExtractorError(u'Could not find video URL')
|
||||||
ext = u'.' + determine_ext(video_url)
|
ext = '.' + determine_ext(video_url)
|
||||||
if ext == title[-len(ext):]:
|
if ext == title[-len(ext):]:
|
||||||
title = title[:-len(ext)]
|
title = title[:-len(ext)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
import itertools
|
import itertools
|
||||||
@@ -9,26 +11,26 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class BambuserIE(InfoExtractor):
|
class BambuserIE(InfoExtractor):
|
||||||
IE_NAME = u'bambuser'
|
IE_NAME = 'bambuser'
|
||||||
_VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
|
_VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
|
||||||
_API_KEY = '005f64509e19a868399060af746a00aa'
|
_API_KEY = '005f64509e19a868399060af746a00aa'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://bambuser.com/v/4050584',
|
'url': 'http://bambuser.com/v/4050584',
|
||||||
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
||||||
#u'md5': u'fba8f7693e48fd4e8641b3fd5539a641',
|
#u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'4050584',
|
'id': '4050584',
|
||||||
u'ext': u'flv',
|
'ext': 'flv',
|
||||||
u'title': u'Education engineering days - lightning talks',
|
'title': 'Education engineering days - lightning talks',
|
||||||
u'duration': 3741,
|
'duration': 3741,
|
||||||
u'uploader': u'pixelversity',
|
'uploader': 'pixelversity',
|
||||||
u'uploader_id': u'344706',
|
'uploader_id': '344706',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# It doesn't respect the 'Range' header, it would download the whole video
|
# It doesn't respect the 'Range' header, it would download the whole video
|
||||||
# caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
|
# caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,7 +55,7 @@ class BambuserIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class BambuserChannelIE(InfoExtractor):
|
class BambuserChannelIE(InfoExtractor):
|
||||||
IE_NAME = u'bambuser:channel'
|
IE_NAME = 'bambuser:channel'
|
||||||
_VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
|
_VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
|
||||||
# The maximum number we can get with each request
|
# The maximum number we can get with each request
|
||||||
_STEP = 50
|
_STEP = 50
|
||||||
@@ -72,7 +74,7 @@ class BambuserChannelIE(InfoExtractor):
|
|||||||
# Without setting this header, we wouldn't get any result
|
# Without setting this header, we wouldn't get any result
|
||||||
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
||||||
info_json = self._download_webpage(req, user,
|
info_json = self._download_webpage(req, user,
|
||||||
u'Downloading page %d' % i)
|
'Downloading page %d' % i)
|
||||||
results = json.loads(info_json)['result']
|
results = json.loads(info_json)['result']
|
||||||
if len(results) == 0:
|
if len(results) == 0:
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@@ -10,16 +12,16 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class BandcampIE(InfoExtractor):
|
class BandcampIE(InfoExtractor):
|
||||||
IE_NAME = u'Bandcamp'
|
|
||||||
_VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
_VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
||||||
u'file': u'1812978515.mp3',
|
'file': '1812978515.mp3',
|
||||||
u'md5': u'cdeb30cdae1921719a3cbcab696ef53c',
|
'md5': 'c557841d5e50261777a6585648adf439',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad"
|
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
||||||
|
"duration": 10,
|
||||||
},
|
},
|
||||||
u'skip': u'There is a limit of 200 free downloads / month for the test song'
|
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -33,82 +35,95 @@ class BandcampIE(InfoExtractor):
|
|||||||
if m_trackinfo:
|
if m_trackinfo:
|
||||||
json_code = m_trackinfo.group(1)
|
json_code = m_trackinfo.group(1)
|
||||||
data = json.loads(json_code)
|
data = json.loads(json_code)
|
||||||
|
d = data[0]
|
||||||
|
|
||||||
for d in data:
|
duration = int(round(d['duration']))
|
||||||
formats = [{
|
formats = []
|
||||||
'format_id': 'format_id',
|
for format_id, format_url in d['file'].items():
|
||||||
|
ext, _, abr_str = format_id.partition('-')
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_id,
|
||||||
'url': format_url,
|
'url': format_url,
|
||||||
'ext': format_id.partition('-')[0]
|
'ext': format_id.partition('-')[0],
|
||||||
} for format_id, format_url in sorted(d['file'].items())]
|
'vcodec': 'none',
|
||||||
|
'acodec': format_id.partition('-')[0],
|
||||||
|
'abr': int(format_id.partition('-')[2]),
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': compat_str(d['id']),
|
'id': compat_str(d['id']),
|
||||||
'title': d['title'],
|
'title': d['title'],
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
'duration': duration,
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
raise ExtractorError(u'No free songs found')
|
raise ExtractorError('No free songs found')
|
||||||
|
|
||||||
download_link = m_download.group(1)
|
download_link = m_download.group(1)
|
||||||
id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
|
video_id = re.search(
|
||||||
|
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
|
||||||
webpage, re.MULTILINE | re.DOTALL).group('id')
|
webpage, re.MULTILINE | re.DOTALL).group('id')
|
||||||
|
|
||||||
download_webpage = self._download_webpage(download_link, id,
|
download_webpage = self._download_webpage(download_link, video_id,
|
||||||
'Downloading free downloads page')
|
'Downloading free downloads page')
|
||||||
# We get the dictionary of the track from some javascrip code
|
# We get the dictionary of the track from some javascrip code
|
||||||
info = re.search(r'items: (.*?),$',
|
info = re.search(r'items: (.*?),$',
|
||||||
download_webpage, re.MULTILINE).group(1)
|
download_webpage, re.MULTILINE).group(1)
|
||||||
info = json.loads(info)[0]
|
info = json.loads(info)[0]
|
||||||
# We pick mp3-320 for now, until format selection can be easily implemented.
|
# We pick mp3-320 for now, until format selection can be easily implemented.
|
||||||
mp3_info = info[u'downloads'][u'mp3-320']
|
mp3_info = info['downloads']['mp3-320']
|
||||||
# If we try to use this url it says the link has expired
|
# If we try to use this url it says the link has expired
|
||||||
initial_url = mp3_info[u'url']
|
initial_url = mp3_info['url']
|
||||||
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
||||||
m_url = re.match(re_url, initial_url)
|
m_url = re.match(re_url, initial_url)
|
||||||
#We build the url we will use to get the final track url
|
#We build the url we will use to get the final track url
|
||||||
# This url is build in Bandcamp in the script download_bunde_*.js
|
# This url is build in Bandcamp in the script download_bunde_*.js
|
||||||
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
|
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
||||||
final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
|
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
||||||
# If we could correctly generate the .rand field the url would be
|
# If we could correctly generate the .rand field the url would be
|
||||||
#in the "download_url" key
|
#in the "download_url" key
|
||||||
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
||||||
|
|
||||||
track_info = {'id':id,
|
return {
|
||||||
'title' : info[u'title'],
|
'id': video_id,
|
||||||
|
'title': info['title'],
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
|
'vcodec': 'none',
|
||||||
'url': final_url,
|
'url': final_url,
|
||||||
'thumbnail' : info[u'thumb_url'],
|
'thumbnail': info.get('thumb_url'),
|
||||||
'uploader' : info[u'artist']
|
'uploader': info.get('artist'),
|
||||||
}
|
}
|
||||||
|
|
||||||
return [track_info]
|
|
||||||
|
|
||||||
|
|
||||||
class BandcampAlbumIE(InfoExtractor):
|
class BandcampAlbumIE(InfoExtractor):
|
||||||
IE_NAME = u'Bandcamp:album'
|
IE_NAME = 'Bandcamp:album'
|
||||||
_VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)'
|
_VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||||
u'playlist': [
|
'playlist': [
|
||||||
{
|
{
|
||||||
u'file': u'1353101989.mp3',
|
'file': '1353101989.mp3',
|
||||||
u'md5': u'39bc1eded3476e927c724321ddf116cf',
|
'md5': '39bc1eded3476e927c724321ddf116cf',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Intro',
|
'title': 'Intro',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u'file': u'38097443.mp3',
|
'file': '38097443.mp3',
|
||||||
u'md5': u'1a2c32e2691474643e912cc6cd4bffaa',
|
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Kero One - Keep It Alive (Blazo remix)',
|
'title': 'Kero One - Keep It Alive (Blazo remix)',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
u'params': {
|
'params': {
|
||||||
u'playlistend': 2
|
'playlistend': 2
|
||||||
},
|
},
|
||||||
u'skip': u'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
|
'skip': 'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -117,11 +132,11 @@ class BandcampAlbumIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, title)
|
webpage = self._download_webpage(url, title)
|
||||||
tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
|
tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
|
||||||
if not tracks_paths:
|
if not tracks_paths:
|
||||||
raise ExtractorError(u'The page doesn\'t contain any track')
|
raise ExtractorError('The page doesn\'t contain any tracks')
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
|
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
|
||||||
for t_path in tracks_paths]
|
for t_path in tracks_paths]
|
||||||
title = self._search_regex(r'album_title : "(.*?)"', webpage, u'title')
|
title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title')
|
||||||
return {
|
return {
|
||||||
'_type': 'playlist',
|
'_type': 'playlist',
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|||||||
218
youtube_dl/extractor/bbccouk.py
Normal file
218
youtube_dl/extractor/bbccouk.py
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||||
|
IE_NAME = 'bbc.co.uk'
|
||||||
|
IE_DESC = 'BBC iPlayer'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:programmes|iplayer/episode)/(?P<id>[\da-z]{8})'
|
||||||
|
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b039d07m',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Kaleidoscope: Leonard Cohen',
|
||||||
|
'description': 'md5:db4755d7a665ae72343779f7dacb402c',
|
||||||
|
'duration': 1740,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b00yng1d',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'The Man in Black: Series 3: The Printed Name',
|
||||||
|
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
|
||||||
|
'duration': 1800,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'Episode is no longer available on BBC iPlayer Radio',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b00yng1d',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||||
|
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
||||||
|
'duration': 5100,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def _extract_asx_playlist(self, connection, programme_id):
|
||||||
|
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
|
||||||
|
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
|
||||||
|
|
||||||
|
def _extract_connection(self, connection, programme_id):
|
||||||
|
formats = []
|
||||||
|
protocol = connection.get('protocol')
|
||||||
|
supplier = connection.get('supplier')
|
||||||
|
if protocol == 'http':
|
||||||
|
href = connection.get('href')
|
||||||
|
# ASX playlist
|
||||||
|
if supplier == 'asx':
|
||||||
|
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
||||||
|
formats.append({
|
||||||
|
'url': ref,
|
||||||
|
'format_id': 'ref%s_%s' % (i, supplier),
|
||||||
|
})
|
||||||
|
# Direct link
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'url': href,
|
||||||
|
'format_id': supplier,
|
||||||
|
})
|
||||||
|
elif protocol == 'rtmp':
|
||||||
|
application = connection.get('application', 'ondemand')
|
||||||
|
auth_string = connection.get('authString')
|
||||||
|
identifier = connection.get('identifier')
|
||||||
|
server = connection.get('server')
|
||||||
|
formats.append({
|
||||||
|
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
|
||||||
|
'play_path': identifier,
|
||||||
|
'app': '%s?%s' % (application, auth_string),
|
||||||
|
'page_url': 'http://www.bbc.co.uk',
|
||||||
|
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
|
||||||
|
'rtmp_live': False,
|
||||||
|
'ext': 'flv',
|
||||||
|
'format_id': supplier,
|
||||||
|
})
|
||||||
|
return formats
|
||||||
|
|
||||||
|
def _extract_items(self, playlist):
|
||||||
|
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
|
||||||
|
|
||||||
|
def _extract_medias(self, media_selection):
|
||||||
|
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
|
||||||
|
|
||||||
|
def _extract_connections(self, media):
|
||||||
|
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
|
||||||
|
|
||||||
|
def _extract_video(self, media, programme_id):
|
||||||
|
formats = []
|
||||||
|
vbr = int(media.get('bitrate'))
|
||||||
|
vcodec = media.get('encoding')
|
||||||
|
service = media.get('service')
|
||||||
|
width = int(media.get('width'))
|
||||||
|
height = int(media.get('height'))
|
||||||
|
file_size = int(media.get('media_file_size'))
|
||||||
|
for connection in self._extract_connections(media):
|
||||||
|
conn_formats = self._extract_connection(connection, programme_id)
|
||||||
|
for format in conn_formats:
|
||||||
|
format.update({
|
||||||
|
'format_id': '%s_%s' % (service, format['format_id']),
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
|
'vbr': vbr,
|
||||||
|
'vcodec': vcodec,
|
||||||
|
'filesize': file_size,
|
||||||
|
})
|
||||||
|
formats.extend(conn_formats)
|
||||||
|
return formats
|
||||||
|
|
||||||
|
def _extract_audio(self, media, programme_id):
|
||||||
|
formats = []
|
||||||
|
abr = int(media.get('bitrate'))
|
||||||
|
acodec = media.get('encoding')
|
||||||
|
service = media.get('service')
|
||||||
|
for connection in self._extract_connections(media):
|
||||||
|
conn_formats = self._extract_connection(connection, programme_id)
|
||||||
|
for format in conn_formats:
|
||||||
|
format.update({
|
||||||
|
'format_id': '%s_%s' % (service, format['format_id']),
|
||||||
|
'abr': abr,
|
||||||
|
'acodec': acodec,
|
||||||
|
})
|
||||||
|
formats.extend(conn_formats)
|
||||||
|
return formats
|
||||||
|
|
||||||
|
def _extract_captions(self, media, programme_id):
|
||||||
|
subtitles = {}
|
||||||
|
for connection in self._extract_connections(media):
|
||||||
|
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
|
||||||
|
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
||||||
|
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}'))
|
||||||
|
srt = ''
|
||||||
|
for pos, p in enumerate(ps):
|
||||||
|
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'),
|
||||||
|
p.text.strip() if p.text is not None else '')
|
||||||
|
subtitles[lang] = srt
|
||||||
|
return subtitles
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
group_id = mobj.group('id')
|
||||||
|
|
||||||
|
playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
|
||||||
|
'Downloading playlist XML')
|
||||||
|
|
||||||
|
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
||||||
|
if no_items is not None:
|
||||||
|
reason = no_items.get('reason')
|
||||||
|
if reason == 'preAvailability':
|
||||||
|
msg = 'Episode %s is not yet available' % group_id
|
||||||
|
elif reason == 'postAvailability':
|
||||||
|
msg = 'Episode %s is no longer available' % group_id
|
||||||
|
else:
|
||||||
|
msg = 'Episode %s is not available: %s' % (group_id, reason)
|
||||||
|
raise ExtractorError(msg, expected=True)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
subtitles = None
|
||||||
|
|
||||||
|
for item in self._extract_items(playlist):
|
||||||
|
kind = item.get('kind')
|
||||||
|
if kind != 'programme' and kind != 'radioProgramme':
|
||||||
|
continue
|
||||||
|
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
|
||||||
|
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
|
||||||
|
|
||||||
|
programme_id = item.get('identifier')
|
||||||
|
duration = int(item.get('duration'))
|
||||||
|
|
||||||
|
media_selection = self._download_xml(
|
||||||
|
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||||
|
programme_id, 'Downloading media selection XML')
|
||||||
|
|
||||||
|
for media in self._extract_medias(media_selection):
|
||||||
|
kind = media.get('kind')
|
||||||
|
if kind == 'audio':
|
||||||
|
formats.extend(self._extract_audio(media, programme_id))
|
||||||
|
elif kind == 'video':
|
||||||
|
formats.extend(self._extract_video(media, programme_id))
|
||||||
|
elif kind == 'captions':
|
||||||
|
subtitles = self._extract_captions(media, programme_id)
|
||||||
|
|
||||||
|
if self._downloader.params.get('listsubtitles', False):
|
||||||
|
self._list_available_subtitles(programme_id, subtitles)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': programme_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'duration': duration,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
@@ -9,20 +11,20 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class BlinkxIE(InfoExtractor):
|
class BlinkxIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/ce/|blinkx:)(?P<id>[^?]+)'
|
_VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
|
||||||
_IE_NAME = u'blinkx'
|
IE_NAME = 'blinkx'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB',
|
'url': 'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB',
|
||||||
u'file': u'8aQUy7GV.mp4',
|
'file': '8aQUy7GV.mp4',
|
||||||
u'md5': u'2e9a07364af40163a908edbf10bb2492',
|
'md5': '2e9a07364af40163a908edbf10bb2492',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"title": u"Police Car Rolls Away",
|
"title": "Police Car Rolls Away",
|
||||||
u"uploader": u"stupidvideos.com",
|
"uploader": "stupidvideos.com",
|
||||||
u"upload_date": u"20131215",
|
"upload_date": "20131215",
|
||||||
u"description": u"A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!",
|
"description": "A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!",
|
||||||
u"duration": 14.886,
|
"duration": 14.886,
|
||||||
u"thumbnails": [{
|
"thumbnails": [{
|
||||||
"width": 100,
|
"width": 100,
|
||||||
"height": 76,
|
"height": 76,
|
||||||
"url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg",
|
"url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg",
|
||||||
@@ -30,17 +32,17 @@ class BlinkxIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, rl):
|
||||||
m = re.match(self._VALID_URL, url)
|
m = re.match(self._VALID_URL, rl)
|
||||||
video_id = m.group('id')
|
video_id = m.group('id')
|
||||||
display_id = video_id[:8]
|
display_id = video_id[:8]
|
||||||
|
|
||||||
api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' +
|
api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' +
|
||||||
u'video=%s' % video_id)
|
'video=%s' % video_id)
|
||||||
data_json = self._download_webpage(api_url, display_id)
|
data_json = self._download_webpage(api_url, display_id)
|
||||||
data = json.loads(data_json)['api']['results'][0]
|
data = json.loads(data_json)['api']['results'][0]
|
||||||
dt = datetime.datetime.fromtimestamp(data['pubdate_epoch'])
|
dt = datetime.datetime.fromtimestamp(data['pubdate_epoch'])
|
||||||
upload_date = dt.strftime('%Y%m%d')
|
pload_date = dt.strftime('%Y%m%d')
|
||||||
|
|
||||||
duration = None
|
duration = None
|
||||||
thumbnails = []
|
thumbnails = []
|
||||||
@@ -54,12 +56,17 @@ class BlinkxIE(InfoExtractor):
|
|||||||
})
|
})
|
||||||
elif m['type'] == 'original':
|
elif m['type'] == 'original':
|
||||||
duration = m['d']
|
duration = m['d']
|
||||||
|
elif m['type'] == 'youtube':
|
||||||
|
yt_id = m['link']
|
||||||
|
self.to_screen(u'Youtube video detected: %s' % yt_id)
|
||||||
|
return self.url_result(yt_id, 'Youtube', video_id=yt_id)
|
||||||
elif m['type'] in ('flv', 'mp4'):
|
elif m['type'] in ('flv', 'mp4'):
|
||||||
vcodec = remove_start(m['vcodec'], 'ff')
|
vcodec = remove_start(m['vcodec'], 'ff')
|
||||||
acodec = remove_start(m['acodec'], 'ff')
|
acodec = remove_start(m['acodec'], 'ff')
|
||||||
|
tbr = (int(m['vbr']) + int(m['abr'])) // 1000
|
||||||
format_id = (u'%s-%sk-%s' %
|
format_id = (u'%s-%sk-%s' %
|
||||||
(vcodec,
|
(vcodec,
|
||||||
(int(m['vbr']) + int(m['abr'])) // 1000,
|
tbr,
|
||||||
m['w']))
|
m['w']))
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
@@ -68,10 +75,12 @@ class BlinkxIE(InfoExtractor):
|
|||||||
'acodec': acodec,
|
'acodec': acodec,
|
||||||
'abr': int(m['abr']) // 1000,
|
'abr': int(m['abr']) // 1000,
|
||||||
'vbr': int(m['vbr']) // 1000,
|
'vbr': int(m['vbr']) // 1000,
|
||||||
|
'tbr': tbr,
|
||||||
'width': int(m['w']),
|
'width': int(m['w']),
|
||||||
'height': int(m['h']),
|
'height': int(m['h']),
|
||||||
})
|
})
|
||||||
formats.sort(key=lambda f: (f['width'], f['vbr'], f['abr']))
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': display_id,
|
'id': display_id,
|
||||||
@@ -79,7 +88,7 @@ class BlinkxIE(InfoExtractor):
|
|||||||
'title': data['title'],
|
'title': data['title'],
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'uploader': data['channel_name'],
|
'uploader': data['channel_name'],
|
||||||
'upload_date': upload_date,
|
'upload_date': pload_date,
|
||||||
'description': data.get('description'),
|
'description': data.get('description'),
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
|
|||||||
@@ -1,157 +1,145 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
import re
|
||||||
import socket
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_http_client,
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
|
|
||||||
ExtractorError,
|
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BlipTVIE(InfoExtractor):
|
class BlipTVIE(SubtitlesInfoExtractor):
|
||||||
"""Information extractor for blip.tv"""
|
"""Information extractor for blip.tv"""
|
||||||
|
|
||||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
|
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$'
|
||||||
_URL_EXT = r'^.*\.([a-z0-9]+)$'
|
|
||||||
IE_NAME = u'blip.tv'
|
|
||||||
_TEST = {
|
|
||||||
u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
|
|
||||||
u'file': u'5779306.m4v',
|
|
||||||
u'md5': u'80baf1ec5c3d2019037c1c707d676b9f',
|
|
||||||
u'info_dict': {
|
|
||||||
u"upload_date": u"20111205",
|
|
||||||
u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596",
|
|
||||||
u"uploader": u"Comic Book Resources - CBR TV",
|
|
||||||
u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
def report_direct_download(self, title):
|
_TESTS = [{
|
||||||
"""Report information extraction."""
|
'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
|
||||||
self.to_screen(u'%s: Direct download detected' % title)
|
'md5': 'c6934ad0b6acf2bd920720ec888eb812',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5779306',
|
||||||
|
'ext': 'mov',
|
||||||
|
'upload_date': '20111205',
|
||||||
|
'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
|
||||||
|
'uploader': 'Comic Book Resources - CBR TV',
|
||||||
|
'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# https://github.com/rg3/youtube-dl/pull/2274
|
||||||
|
'note': 'Video with subtitles',
|
||||||
|
'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
|
||||||
|
'md5': '309f9d25b820b086ca163ffac8031806',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6586561',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'uploader': 'Red vs. Blue',
|
||||||
|
'description': 'One-Zero-One',
|
||||||
|
'upload_date': '20130614',
|
||||||
|
'title': 'Red vs. Blue Season 11 Episode 1',
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
presumptive_id = mobj.group('presumptive_id')
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
|
|
||||||
# See https://github.com/rg3/youtube-dl/issues/857
|
# See https://github.com/rg3/youtube-dl/issues/857
|
||||||
api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
|
embed_mobj = re.match(r'https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', url)
|
||||||
if api_mobj is not None:
|
if embed_mobj:
|
||||||
url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
|
info_url = 'http://blip.tv/play/%s.x?p=1' % embed_mobj.group(1)
|
||||||
urlp = compat_urllib_parse_urlparse(url)
|
info_page = self._download_webpage(info_url, embed_mobj.group(1))
|
||||||
if urlp.path.startswith('/play/'):
|
video_id = self._search_regex(
|
||||||
response = self._request_webpage(url, None, False)
|
r'data-episode-id="([0-9]+)', info_page, 'video_id')
|
||||||
redirecturl = response.geturl()
|
return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV')
|
||||||
rurlp = compat_urllib_parse_urlparse(redirecturl)
|
|
||||||
file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
|
|
||||||
url = 'http://blip.tv/a/a-' + file_id
|
|
||||||
return self._real_extract(url)
|
|
||||||
|
|
||||||
|
cchar = '&' if '?' in url else '?'
|
||||||
if '?' in url:
|
|
||||||
cchar = '&'
|
|
||||||
else:
|
|
||||||
cchar = '?'
|
|
||||||
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
|
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
|
||||||
request = compat_urllib_request.Request(json_url)
|
request = compat_urllib_request.Request(json_url)
|
||||||
request.add_header('User-Agent', 'iTunes/10.6.1')
|
request.add_header('User-Agent', 'iTunes/10.6.1')
|
||||||
self.report_extraction(mobj.group(1))
|
|
||||||
info = None
|
|
||||||
urlh = self._request_webpage(request, None, False,
|
|
||||||
u'unable to download video info webpage')
|
|
||||||
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
|
|
||||||
basename = url.split('/')[-1]
|
|
||||||
title,ext = os.path.splitext(basename)
|
|
||||||
title = title.decode('UTF-8')
|
|
||||||
ext = ext.replace('.', '')
|
|
||||||
self.report_direct_download(title)
|
|
||||||
info = {
|
|
||||||
'id': title,
|
|
||||||
'url': url,
|
|
||||||
'uploader': None,
|
|
||||||
'upload_date': None,
|
|
||||||
'title': title,
|
|
||||||
'ext': ext,
|
|
||||||
'urlhandle': urlh
|
|
||||||
}
|
|
||||||
if info is None: # Regular URL
|
|
||||||
try:
|
|
||||||
json_code_bytes = urlh.read()
|
|
||||||
json_code = json_code_bytes.decode('utf-8')
|
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
|
||||||
raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
|
|
||||||
|
|
||||||
try:
|
json_data = self._download_json(request, video_id=presumptive_id)
|
||||||
json_data = json.loads(json_code)
|
|
||||||
if 'Post' in json_data:
|
if 'Post' in json_data:
|
||||||
data = json_data['Post']
|
data = json_data['Post']
|
||||||
else:
|
else:
|
||||||
data = json_data
|
data = json_data
|
||||||
|
|
||||||
|
video_id = compat_str(data['item_id'])
|
||||||
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
|
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
|
||||||
|
subtitles = {}
|
||||||
|
formats = []
|
||||||
if 'additionalMedia' in data:
|
if 'additionalMedia' in data:
|
||||||
formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height']))
|
for f in data['additionalMedia']:
|
||||||
best_format = formats[-1]
|
if f.get('file_type_srt') == 1:
|
||||||
video_url = best_format['url']
|
LANGS = {
|
||||||
|
'english': 'en',
|
||||||
|
}
|
||||||
|
lang = f['role'].rpartition('-')[-1].strip().lower()
|
||||||
|
langcode = LANGS.get(lang, lang)
|
||||||
|
subtitles[langcode] = f['url']
|
||||||
|
continue
|
||||||
|
if not int(f['media_width']): # filter m3u8
|
||||||
|
continue
|
||||||
|
formats.append({
|
||||||
|
'url': f['url'],
|
||||||
|
'format_id': f['role'],
|
||||||
|
'width': int(f['media_width']),
|
||||||
|
'height': int(f['media_height']),
|
||||||
|
})
|
||||||
else:
|
else:
|
||||||
video_url = data['media']['url']
|
formats.append({
|
||||||
umobj = re.match(self._URL_EXT, video_url)
|
'url': data['media']['url'],
|
||||||
if umobj is None:
|
'width': int(data['media']['width']),
|
||||||
raise ValueError('Can not determine filename extension')
|
'height': int(data['media']['height']),
|
||||||
ext = umobj.group(1)
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
info = {
|
# subtitles
|
||||||
'id': compat_str(data['item_id']),
|
video_subtitles = self.extract_subtitles(video_id, subtitles)
|
||||||
'url': video_url,
|
if self._downloader.params.get('listsubtitles', False):
|
||||||
|
self._list_available_subtitles(video_id, subtitles)
|
||||||
|
return
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
'uploader': data['display_name'],
|
'uploader': data['display_name'],
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'title': data['title'],
|
'title': data['title'],
|
||||||
'ext': ext,
|
|
||||||
'format': data['media']['mimeType'],
|
|
||||||
'thumbnail': data['thumbnailUrl'],
|
'thumbnail': data['thumbnailUrl'],
|
||||||
'description': data['description'],
|
'description': data['description'],
|
||||||
'player_url': data['embedUrl'],
|
|
||||||
'user_agent': 'iTunes/10.6.1',
|
'user_agent': 'iTunes/10.6.1',
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': video_subtitles,
|
||||||
}
|
}
|
||||||
except (ValueError,KeyError) as err:
|
|
||||||
raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
|
|
||||||
|
|
||||||
return [info]
|
def _download_subtitle_url(self, sub_lang, url):
|
||||||
|
# For some weird reason, blip.tv serves a video instead of subtitles
|
||||||
|
# when we request with a common UA
|
||||||
|
req = compat_urllib_request.Request(url)
|
||||||
|
req.add_header('Youtubedl-user-agent', 'youtube-dl')
|
||||||
|
return self._download_webpage(req, None, note=False)
|
||||||
|
|
||||||
|
|
||||||
class BlipTVUserIE(InfoExtractor):
|
class BlipTVUserIE(InfoExtractor):
|
||||||
"""Information Extractor for blip.tv users."""
|
|
||||||
|
|
||||||
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
|
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
|
||||||
_PAGE_SIZE = 12
|
_PAGE_SIZE = 12
|
||||||
IE_NAME = u'blip.tv:user'
|
IE_NAME = 'blip.tv:user'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# Extract username
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
|
|
||||||
username = mobj.group(1)
|
username = mobj.group(1)
|
||||||
|
|
||||||
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
|
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
|
||||||
|
|
||||||
page = self._download_webpage(url, username, u'Downloading user page')
|
page = self._download_webpage(url, username, 'Downloading user page')
|
||||||
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
||||||
page_base = page_base % mobj.group(1)
|
page_base = page_base % mobj.group(1)
|
||||||
|
|
||||||
|
|
||||||
# Download video ids using BlipTV Ajax calls. Result size per
|
# Download video ids using BlipTV Ajax calls. Result size per
|
||||||
# query is limited (currently to 12 videos) so we need to query
|
# query is limited (currently to 12 videos) so we need to query
|
||||||
# page by page until there are no video ids - it means we got
|
# page by page until there are no video ids - it means we got
|
||||||
@@ -162,8 +150,8 @@ class BlipTVUserIE(InfoExtractor):
|
|||||||
|
|
||||||
while True:
|
while True:
|
||||||
url = page_base + "&page=" + str(pagenum)
|
url = page_base + "&page=" + str(pagenum)
|
||||||
page = self._download_webpage(url, username,
|
page = self._download_webpage(
|
||||||
u'Downloading video ids from page %d' % pagenum)
|
url, username, 'Downloading video ids from page %d' % pagenum)
|
||||||
|
|
||||||
# Extract video identifiers
|
# Extract video identifiers
|
||||||
ids_in_page = []
|
ids_in_page = []
|
||||||
@@ -185,6 +173,6 @@ class BlipTVUserIE(InfoExtractor):
|
|||||||
|
|
||||||
pagenum += 1
|
pagenum += 1
|
||||||
|
|
||||||
urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
|
urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
|
||||||
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
|
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
|
||||||
return [self.playlist_result(url_entries, playlist_title=username)]
|
return [self.playlist_result(url_entries, playlist_title=username)]
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .ooyala import OoyalaIE
|
||||||
|
|
||||||
|
|
||||||
class BloombergIE(InfoExtractor):
|
class BloombergIE(InfoExtractor):
|
||||||
@@ -23,5 +24,7 @@ class BloombergIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
name = mobj.group('name')
|
name = mobj.group('name')
|
||||||
webpage = self._download_webpage(url, name)
|
webpage = self._download_webpage(url, name)
|
||||||
ooyala_url = self._og_search_video_url(webpage)
|
embed_code = self._search_regex(
|
||||||
return self.url_result(ooyala_url, ie='Ooyala')
|
r'<source src="https?://[^/]+/[^/]+/[^/]+/([^/]+)', webpage,
|
||||||
|
'embed code')
|
||||||
|
return OoyalaIE._build_url_result(embed_code)
|
||||||
|
|||||||
@@ -1,18 +1,20 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
|
||||||
|
|
||||||
|
|
||||||
class BreakIE(InfoExtractor):
|
class BreakIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
|
_VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||||
u'file': u'2468056.mp4',
|
'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
|
||||||
u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '2468056',
|
||||||
u"title": u"When Girls Act Like D-Bags"
|
'ext': 'mp4',
|
||||||
|
'title': 'When Girls Act Like D-Bags',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,17 +24,16 @@ class BreakIE(InfoExtractor):
|
|||||||
embed_url = 'http://www.break.com/embed/%s' % video_id
|
embed_url = 'http://www.break.com/embed/%s' % video_id
|
||||||
webpage = self._download_webpage(embed_url, video_id)
|
webpage = self._download_webpage(embed_url, video_id)
|
||||||
info_json = self._search_regex(r'var embedVars = ({.*?});', webpage,
|
info_json = self._search_regex(r'var embedVars = ({.*?});', webpage,
|
||||||
u'info json', flags=re.DOTALL)
|
'info json', flags=re.DOTALL)
|
||||||
info = json.loads(info_json)
|
info = json.loads(info_json)
|
||||||
video_url = info['videoUri']
|
video_url = info['videoUri']
|
||||||
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url)
|
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url)
|
||||||
if m_youtube is not None:
|
if m_youtube is not None:
|
||||||
return self.url_result(m_youtube.group(1), 'Youtube')
|
return self.url_result(m_youtube.group(1), 'Youtube')
|
||||||
final_url = video_url + '?' + info['AuthToken']
|
final_url = video_url + '?' + info['AuthToken']
|
||||||
return [{
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': final_url,
|
'url': final_url,
|
||||||
'ext': determine_ext(final_url),
|
|
||||||
'title': info['contentName'],
|
'title': info['contentName'],
|
||||||
'thumbnail': info['thumbUri'],
|
'thumbnail': info['thumbUri'],
|
||||||
}]
|
}
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
@@ -8,65 +9,68 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
|
fix_xml_ampersands,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
|
compat_parse_qs,
|
||||||
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
unsmuggle_url,
|
||||||
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BrightcoveIE(InfoExtractor):
|
class BrightcoveIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
|
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
|
||||||
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
||||||
_PLAYLIST_URL_TEMPLATE = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s'
|
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
|
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
|
||||||
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
|
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
|
||||||
u'file': u'2371591881001.mp4',
|
'file': '2371591881001.mp4',
|
||||||
u'md5': u'8eccab865181d29ec2958f32a6a754f5',
|
'md5': '5423e113865d26e40624dce2e4b45d95',
|
||||||
u'note': u'Test Brightcove downloads and detection in GenericIE',
|
'note': 'Test Brightcove downloads and detection in GenericIE',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
|
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
|
||||||
u'uploader': u'8TV',
|
'uploader': '8TV',
|
||||||
u'description': u'md5:a950cc4285c43e44d763d036710cd9cd',
|
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
# From http://medianetwork.oracle.com/video/player/1785452137001
|
# From http://medianetwork.oracle.com/video/player/1785452137001
|
||||||
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
|
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
|
||||||
u'file': u'1785452137001.flv',
|
'file': '1785452137001.flv',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
|
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
|
||||||
u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.',
|
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
|
||||||
u'uploader': u'Oracle',
|
'uploader': 'Oracle',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
|
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
|
||||||
u'url': u'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
|
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'2750934548001',
|
'id': '2750934548001',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'This Bracelet Acts as a Personal Thermostat',
|
'title': 'This Bracelet Acts as a Personal Thermostat',
|
||||||
u'description': u'md5:547b78c64f4112766ccf4e151c20b6a0',
|
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
|
||||||
u'uploader': u'Mashable',
|
'uploader': 'Mashable',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
# test that the default referer works
|
# test that the default referer works
|
||||||
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
|
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
|
||||||
u'url': u'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
|
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'2878862109001',
|
'id': '2878862109001',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'Lost in Motion II',
|
'title': 'Lost in Motion II',
|
||||||
u'description': u'md5:363109c02998fee92ec02211bd8000df',
|
'description': 'md5:363109c02998fee92ec02211bd8000df',
|
||||||
u'uploader': u'National Ballet of Canada',
|
'uploader': 'National Ballet of Canada',
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -80,18 +84,34 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
|
object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
|
||||||
lambda m: m.group(1) + '/>', object_str)
|
lambda m: m.group(1) + '/>', object_str)
|
||||||
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
|
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
|
||||||
object_str = object_str.replace(u'<--', u'<!--')
|
object_str = object_str.replace('<--', '<!--')
|
||||||
|
object_str = fix_xml_ampersands(object_str)
|
||||||
|
|
||||||
object_doc = xml.etree.ElementTree.fromstring(object_str)
|
object_doc = xml.etree.ElementTree.fromstring(object_str)
|
||||||
assert u'BrightcoveExperience' in object_doc.attrib['class']
|
|
||||||
params = {'flashID': object_doc.attrib['id'],
|
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
|
||||||
'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
|
if fv_el is not None:
|
||||||
}
|
flashvars = dict(
|
||||||
|
(k, v[0])
|
||||||
|
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
|
||||||
|
else:
|
||||||
|
flashvars = {}
|
||||||
|
|
||||||
def find_param(name):
|
def find_param(name):
|
||||||
|
if name in flashvars:
|
||||||
|
return flashvars[name]
|
||||||
node = find_xpath_attr(object_doc, './param', 'name', name)
|
node = find_xpath_attr(object_doc, './param', 'name', name)
|
||||||
if node is not None:
|
if node is not None:
|
||||||
return node.attrib['value']
|
return node.attrib['value']
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
params = {}
|
||||||
|
|
||||||
|
playerID = find_param('playerID')
|
||||||
|
if playerID is None:
|
||||||
|
raise ExtractorError('Cannot find player ID')
|
||||||
|
params['playerID'] = playerID
|
||||||
|
|
||||||
playerKey = find_param('playerKey')
|
playerKey = find_param('playerKey')
|
||||||
# Not all pages define this value
|
# Not all pages define this value
|
||||||
if playerKey is not None:
|
if playerKey is not None:
|
||||||
@@ -108,18 +128,32 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _extract_brightcove_url(cls, webpage):
|
def _extract_brightcove_url(cls, webpage):
|
||||||
"""Try to extract the brightcove url from the wepbage, returns None
|
"""Try to extract the brightcove url from the webpage, returns None
|
||||||
if it can't be found
|
if it can't be found
|
||||||
"""
|
"""
|
||||||
m_brightcove = re.search(
|
urls = cls._extract_brightcove_urls(webpage)
|
||||||
r'<object[^>]+?class=([\'"])[^>]*?BrightcoveExperience.*?\1.+?</object>',
|
return urls[0] if urls else None
|
||||||
webpage, re.DOTALL)
|
|
||||||
if m_brightcove is not None:
|
@classmethod
|
||||||
return cls._build_brighcove_url(m_brightcove.group())
|
def _extract_brightcove_urls(cls, webpage):
|
||||||
else:
|
"""Return a list of all Brightcove URLs from the webpage """
|
||||||
return None
|
|
||||||
|
url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage)
|
||||||
|
if url_m:
|
||||||
|
return [unescapeHTML(url_m.group(1))]
|
||||||
|
|
||||||
|
matches = re.findall(
|
||||||
|
r'''(?sx)<object
|
||||||
|
(?:
|
||||||
|
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
|
||||||
|
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
|
||||||
|
).+?</object>''',
|
||||||
|
webpage)
|
||||||
|
return [cls._build_brighcove_url(m) for m in matches]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
|
|
||||||
# Change the 'videoId' and others field to '@videoPlayer'
|
# Change the 'videoId' and others field to '@videoPlayer'
|
||||||
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
|
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
|
||||||
# Change bckey (used by bcove.me urls) to playerKey
|
# Change bckey (used by bcove.me urls) to playerKey
|
||||||
@@ -130,9 +164,10 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
|
|
||||||
videoPlayer = query.get('@videoPlayer')
|
videoPlayer = query.get('@videoPlayer')
|
||||||
if videoPlayer:
|
if videoPlayer:
|
||||||
return self._get_video_info(videoPlayer[0], query_str, query,
|
|
||||||
# We set the original url as the default 'Referer' header
|
# We set the original url as the default 'Referer' header
|
||||||
referer=url)
|
referer = smuggled_data.get('Referer', url)
|
||||||
|
return self._get_video_info(
|
||||||
|
videoPlayer[0], query_str, query, referer=referer)
|
||||||
else:
|
else:
|
||||||
player_key = query['playerKey']
|
player_key = query['playerKey']
|
||||||
return self._get_playlist_info(player_key[0])
|
return self._get_playlist_info(player_key[0])
|
||||||
@@ -151,16 +186,18 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
|
info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
|
||||||
info = json.loads(info)['data']
|
info = json.loads(info)['data']
|
||||||
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
|
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
|
||||||
|
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
|
||||||
|
|
||||||
return self._extract_video_info(video_info)
|
return self._extract_video_info(video_info)
|
||||||
|
|
||||||
def _get_playlist_info(self, player_key):
|
def _get_playlist_info(self, player_key):
|
||||||
playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
|
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
|
||||||
player_key, u'Downloading playlist information')
|
playlist_info = self._download_webpage(
|
||||||
|
info_url, player_key, 'Downloading playlist information')
|
||||||
|
|
||||||
json_data = json.loads(playlist_info)
|
json_data = json.loads(playlist_info)
|
||||||
if 'videoList' not in json_data:
|
if 'videoList' not in json_data:
|
||||||
raise ExtractorError(u'Empty playlist')
|
raise ExtractorError('Empty playlist')
|
||||||
playlist_info = json_data['videoList']
|
playlist_info = json_data['videoList']
|
||||||
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
|
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
|
||||||
|
|
||||||
@@ -170,7 +207,7 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
def _extract_video_info(self, video_info):
|
def _extract_video_info(self, video_info):
|
||||||
info = {
|
info = {
|
||||||
'id': compat_str(video_info['id']),
|
'id': compat_str(video_info['id']),
|
||||||
'title': video_info['displayName'],
|
'title': video_info['displayName'].strip(),
|
||||||
'description': video_info.get('shortDescription'),
|
'description': video_info.get('shortDescription'),
|
||||||
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
|
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
|
||||||
'uploader': video_info.get('publisherName'),
|
'uploader': video_info.get('publisherName'),
|
||||||
@@ -188,6 +225,23 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
info.update({
|
info.update({
|
||||||
'url': video_info['FLVFullLengthURL'],
|
'url': video_info['FLVFullLengthURL'],
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if self._downloader.params.get('include_ads', False):
|
||||||
|
adServerURL = video_info.get('_youtubedl_adServerURL')
|
||||||
|
if adServerURL:
|
||||||
|
ad_info = {
|
||||||
|
'_type': 'url',
|
||||||
|
'url': adServerURL,
|
||||||
|
}
|
||||||
|
if 'url' in info:
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'title': info['title'],
|
||||||
|
'entries': [ad_info, info],
|
||||||
|
}
|
||||||
else:
|
else:
|
||||||
raise ExtractorError(u'Unable to extract video url for %s' % info['id'])
|
return ad_info
|
||||||
|
|
||||||
|
if 'url' not in info and not info.get('formats'):
|
||||||
|
raise ExtractorError('Unable to extract video url for %s' % info['id'])
|
||||||
return info
|
return info
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
|
||||||
|
|
||||||
class C56IE(InfoExtractor):
|
class C56IE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)'
|
_VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)'
|
||||||
IE_NAME = u'56.com'
|
IE_NAME = '56.com'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.56.com/u39/v_OTM0NDA3MTY.html',
|
'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html',
|
||||||
u'file': u'93440716.flv',
|
'file': '93440716.flv',
|
||||||
u'md5': u'e59995ac63d0457783ea05f93f12a866',
|
'md5': 'e59995ac63d0457783ea05f93f12a866',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'网事知多少 第32期:车怒',
|
'title': '网事知多少 第32期:车怒',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -23,14 +23,18 @@ class C56IE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
|
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
|
||||||
text_id = mobj.group('textid')
|
text_id = mobj.group('textid')
|
||||||
info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id,
|
info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id,
|
||||||
text_id, u'Downloading video info')
|
text_id, 'Downloading video info')
|
||||||
info = json.loads(info_page)['info']
|
info = json.loads(info_page)['info']
|
||||||
best_format = sorted(info['rfiles'], key=lambda f: int(f['filesize']))[-1]
|
formats = [{
|
||||||
video_url = best_format['url']
|
'format_id': f['type'],
|
||||||
|
'filesize': int(f['filesize']),
|
||||||
|
'url': f['url']
|
||||||
|
} for f in info['rfiles']]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {'id': info['vid'],
|
return {
|
||||||
|
'id': info['vid'],
|
||||||
'title': info['Subject'],
|
'title': info['Subject'],
|
||||||
'url': video_url,
|
'formats': formats,
|
||||||
'ext': determine_ext(video_url),
|
|
||||||
'thumbnail': info.get('bimg') or info.get('img'),
|
'thumbnail': info.get('bimg') or info.get('img'),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@@ -13,36 +13,38 @@ class Channel9IE(InfoExtractor):
|
|||||||
meta Search.PageType from web page HTML rather than URL itself, as it is
|
meta Search.PageType from web page HTML rather than URL itself, as it is
|
||||||
not always possible to do.
|
not always possible to do.
|
||||||
'''
|
'''
|
||||||
IE_DESC = u'Channel 9'
|
IE_DESC = 'Channel 9'
|
||||||
IE_NAME = u'channel9'
|
IE_NAME = 'channel9'
|
||||||
_VALID_URL = r'^https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
|
_VALID_URL = r'https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
u'url': u'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
|
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
|
||||||
u'file': u'Events_TechEd_Australia_2013_KOS002.mp4',
|
'md5': 'bbd75296ba47916b754e73c3a4bbdf10',
|
||||||
u'md5': u'bbd75296ba47916b754e73c3a4bbdf10',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': 'Events/TechEd/Australia/2013/KOS002',
|
||||||
u'title': u'Developer Kick-Off Session: Stuff We Love',
|
'ext': 'mp4',
|
||||||
u'description': u'md5:c08d72240b7c87fcecafe2692f80e35f',
|
'title': 'Developer Kick-Off Session: Stuff We Love',
|
||||||
u'duration': 4576,
|
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
|
||||||
u'thumbnail': u'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
'duration': 4576,
|
||||||
u'session_code': u'KOS002',
|
'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
||||||
u'session_day': u'Day 1',
|
'session_code': 'KOS002',
|
||||||
u'session_room': u'Arena 1A',
|
'session_day': 'Day 1',
|
||||||
u'session_speakers': [ u'Ed Blankenship', u'Andrew Coates', u'Brady Gaster', u'Patrick Klug', u'Mads Kristensen' ],
|
'session_room': 'Arena 1A',
|
||||||
|
'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u'url': u'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
|
'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
|
||||||
u'file': u'posts_Self-service-BI-with-Power-BI-nuclear-testing.mp4',
|
'md5': 'b43ee4529d111bc37ba7ee4f34813e68',
|
||||||
u'md5': u'b43ee4529d111bc37ba7ee4f34813e68',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': 'posts/Self-service-BI-with-Power-BI-nuclear-testing',
|
||||||
u'title': u'Self-service BI with Power BI - nuclear testing',
|
'ext': 'mp4',
|
||||||
u'description': u'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
'title': 'Self-service BI with Power BI - nuclear testing',
|
||||||
u'duration': 1540,
|
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||||
u'thumbnail': u'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
'duration': 1540,
|
||||||
u'authors': [ u'Mike Wilmot' ],
|
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||||
|
'authors': [ 'Mike Wilmot' ],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -60,7 +62,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
return 0
|
return 0
|
||||||
units = m.group('units')
|
units = m.group('units')
|
||||||
try:
|
try:
|
||||||
exponent = [u'B', u'KB', u'MB', u'GB', u'TB', u'PB', u'EB', u'ZB', u'YB'].index(units.upper())
|
exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper())
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return 0
|
return 0
|
||||||
size = float(m.group('size'))
|
size = float(m.group('size'))
|
||||||
@@ -76,21 +78,25 @@ class Channel9IE(InfoExtractor):
|
|||||||
</div>)? # File size part may be missing
|
</div>)? # File size part may be missing
|
||||||
'''
|
'''
|
||||||
# Extract known formats
|
# Extract known formats
|
||||||
formats = [{'url': x.group('url'),
|
formats = [{
|
||||||
|
'url': x.group('url'),
|
||||||
'format_id': x.group('quality'),
|
'format_id': x.group('quality'),
|
||||||
'format_note': x.group('note'),
|
'format_note': x.group('note'),
|
||||||
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
|
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
|
||||||
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
||||||
|
'preference': self._known_formats.index(x.group('quality')),
|
||||||
|
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
|
||||||
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
|
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
|
||||||
# Sort according to known formats list
|
|
||||||
formats.sort(key=lambda fmt: self._known_formats.index(fmt['format_id']))
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _extract_title(self, html):
|
def _extract_title(self, html):
|
||||||
title = self._html_search_meta(u'title', html, u'title')
|
title = self._html_search_meta('title', html, 'title')
|
||||||
if title is None:
|
if title is None:
|
||||||
title = self._og_search_title(html)
|
title = self._og_search_title(html)
|
||||||
TITLE_SUFFIX = u' (Channel 9)'
|
TITLE_SUFFIX = ' (Channel 9)'
|
||||||
if title is not None and title.endswith(TITLE_SUFFIX):
|
if title is not None and title.endswith(TITLE_SUFFIX):
|
||||||
title = title[:-len(TITLE_SUFFIX)]
|
title = title[:-len(TITLE_SUFFIX)]
|
||||||
return title
|
return title
|
||||||
@@ -106,7 +112,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
m = re.search(DESCRIPTION_REGEX, html)
|
m = re.search(DESCRIPTION_REGEX, html)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
return m.group('description')
|
return m.group('description')
|
||||||
return self._html_search_meta(u'description', html, u'description')
|
return self._html_search_meta('description', html, 'description')
|
||||||
|
|
||||||
def _extract_duration(self, html):
|
def _extract_duration(self, html):
|
||||||
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
||||||
@@ -168,7 +174,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
|
|
||||||
# Nothing to download
|
# Nothing to download
|
||||||
if len(formats) == 0 and slides is None and zip_ is None:
|
if len(formats) == 0 and slides is None and zip_ is None:
|
||||||
self._downloader.report_warning(u'None of recording, slides or zip are available for %s' % content_path)
|
self._downloader.report_warning('None of recording, slides or zip are available for %s' % content_path)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Extract meta
|
# Extract meta
|
||||||
@@ -240,7 +246,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
return contents
|
return contents
|
||||||
|
|
||||||
def _extract_list(self, content_path):
|
def _extract_list(self, content_path):
|
||||||
rss = self._download_xml(self._RSS_URL % content_path, content_path, u'Downloading RSS')
|
rss = self._download_xml(self._RSS_URL % content_path, content_path, 'Downloading RSS')
|
||||||
entries = [self.url_result(session_url.text, 'Channel9')
|
entries = [self.url_result(session_url.text, 'Channel9')
|
||||||
for session_url in rss.findall('./channel/item/link')]
|
for session_url in rss.findall('./channel/item/link')]
|
||||||
title_text = rss.find('./channel/title').text
|
title_text = rss.find('./channel/title').text
|
||||||
@@ -250,11 +256,11 @@ class Channel9IE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
content_path = mobj.group('contentpath')
|
content_path = mobj.group('contentpath')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, content_path, u'Downloading web page')
|
webpage = self._download_webpage(url, content_path, 'Downloading web page')
|
||||||
|
|
||||||
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
|
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
|
||||||
if page_type_m is None:
|
if page_type_m is None:
|
||||||
raise ExtractorError(u'Search.PageType not found, don\'t know how to process this page', expected=True)
|
raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
|
||||||
|
|
||||||
page_type = page_type_m.group('pagetype')
|
page_type = page_type_m.group('pagetype')
|
||||||
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
||||||
@@ -264,4 +270,4 @@ class Channel9IE(InfoExtractor):
|
|||||||
elif page_type == 'Session': # Event session page, may contain downloadable content
|
elif page_type == 'Session': # Event session page, may contain downloadable content
|
||||||
return self._extract_session(webpage, content_path)
|
return self._extract_session(webpage, content_path)
|
||||||
else:
|
else:
|
||||||
raise ExtractorError(u'Unexpected Search.PageType %s' % page_type, expected=True)
|
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
|
||||||
97
youtube_dl/extractor/chilloutzone.py
Normal file
97
youtube_dl/extractor/chilloutzone.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
|
ExtractorError
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ChilloutzoneIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?chilloutzone\.net/video/(?P<id>[\w|-]+)\.html'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html',
|
||||||
|
'md5': 'a76f3457e813ea0037e5244f509e66d1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'enemene-meck-alle-katzen-weg',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Enemene Meck - Alle Katzen weg',
|
||||||
|
'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'note': 'Video hosted at YouTube',
|
||||||
|
'url': 'http://www.chilloutzone.net/video/eine-sekunde-bevor.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1YVQaAgHyRU',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '16 Photos Taken 1 Second Before Disaster',
|
||||||
|
'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814',
|
||||||
|
'uploader': 'BuzzFeedVideo',
|
||||||
|
'uploader_id': 'BuzzFeedVideo',
|
||||||
|
'upload_date': '20131105',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'note': 'Video hosted at Vimeo',
|
||||||
|
'url': 'http://www.chilloutzone.net/video/icon-blending.html',
|
||||||
|
'md5': '2645c678b8dc4fefcc0e1b60db18dac1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '85523671',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'The Sunday Times - Icons',
|
||||||
|
'description': 'md5:3e1c0dc6047498d6728dcdaad0891762',
|
||||||
|
'uploader': 'Us',
|
||||||
|
'uploader_id': 'usfilms',
|
||||||
|
'upload_date': '20140131'
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
base64_video_info = self._html_search_regex(
|
||||||
|
r'var cozVidData = "(.+?)";', webpage, 'video data')
|
||||||
|
decoded_video_info = base64.b64decode(base64_video_info).decode("utf-8")
|
||||||
|
video_info_dict = json.loads(decoded_video_info)
|
||||||
|
|
||||||
|
# get video information from dict
|
||||||
|
video_url = video_info_dict['mediaUrl']
|
||||||
|
description = clean_html(video_info_dict.get('description'))
|
||||||
|
title = video_info_dict['title']
|
||||||
|
native_platform = video_info_dict['nativePlatform']
|
||||||
|
native_video_id = video_info_dict['nativeVideoId']
|
||||||
|
source_priority = video_info_dict['sourcePriority']
|
||||||
|
|
||||||
|
# If nativePlatform is None a fallback mechanism is used (i.e. youtube embed)
|
||||||
|
if native_platform is None:
|
||||||
|
youtube_url = self._html_search_regex(
|
||||||
|
r'<iframe.* src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
|
||||||
|
webpage, 'fallback video URL', default=None)
|
||||||
|
if youtube_url is not None:
|
||||||
|
return self.url_result(youtube_url, ie='Youtube')
|
||||||
|
|
||||||
|
# Non Fallback: Decide to use native source (e.g. youtube or vimeo) or
|
||||||
|
# the own CDN
|
||||||
|
if source_priority == 'native':
|
||||||
|
if native_platform == 'youtube':
|
||||||
|
return self.url_result(native_video_id, ie='Youtube')
|
||||||
|
if native_platform == 'vimeo':
|
||||||
|
return self.url_result(
|
||||||
|
'http://vimeo.com/' + native_video_id, ie='Vimeo')
|
||||||
|
|
||||||
|
if not video_url:
|
||||||
|
raise ExtractorError('No video found')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
}
|
||||||
56
youtube_dl/extractor/cliphunter.py
Normal file
56
youtube_dl/extractor/cliphunter.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
translation_table = {
|
||||||
|
'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n',
|
||||||
|
'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r',
|
||||||
|
'y': 'l', 'z': 'i',
|
||||||
|
'$': ':', '&': '.', '(': '=', '^': '&', '=': '/',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CliphunterIE(InfoExtractor):
|
||||||
|
IE_NAME = 'cliphunter'
|
||||||
|
|
||||||
|
_VALID_URL = r'''(?x)http://(?:www\.)?cliphunter\.com/w/
|
||||||
|
(?P<id>[0-9]+)/
|
||||||
|
(?P<seo>.+?)(?:$|[#\?])
|
||||||
|
'''
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo',
|
||||||
|
'file': '1012420.flv',
|
||||||
|
'md5': '15e7740f30428abf70f4223478dc1225',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Fun Jynx Maze solo',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
pl_fiji = self._search_regex(
|
||||||
|
r'pl_fiji = \'([^\']+)\'', webpage, 'video data')
|
||||||
|
pl_c_qual = self._search_regex(
|
||||||
|
r'pl_c_qual = "(.)"', webpage, 'video quality')
|
||||||
|
video_title = self._search_regex(
|
||||||
|
r'mediaTitle = "([^"]+)"', webpage, 'title')
|
||||||
|
|
||||||
|
video_url = ''.join(translation_table.get(c, c) for c in pl_fiji)
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': pl_c_qual,
|
||||||
|
}]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': video_title,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
@@ -3,7 +3,7 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
fix_xml_all_ampersand,
|
fix_xml_ampersands
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ class ClipsyndicateIE(InfoExtractor):
|
|||||||
pdoc = self._download_xml(
|
pdoc = self._download_xml(
|
||||||
'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars,
|
'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars,
|
||||||
video_id, u'Downloading video info',
|
video_id, u'Downloading video info',
|
||||||
transform_source=fix_xml_all_ampersand)
|
transform_source=fix_xml_ampersands)
|
||||||
|
|
||||||
track_doc = pdoc.find('trackList/track')
|
track_doc = pdoc.find('trackList/track')
|
||||||
def find_param(name):
|
def find_param(name):
|
||||||
|
|||||||
19
youtube_dl/extractor/cmt.py
Normal file
19
youtube_dl/extractor/cmt.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
from .mtv import MTVIE
|
||||||
|
|
||||||
|
class CMTIE(MTVIE):
|
||||||
|
IE_NAME = u'cmt.com'
|
||||||
|
_VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml'
|
||||||
|
_FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/'
|
||||||
|
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
u'url': u'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061',
|
||||||
|
u'md5': u'e6b7ef3c4c45bbfae88061799bbba6c2',
|
||||||
|
u'info_dict': {
|
||||||
|
u'id': u'989124',
|
||||||
|
u'ext': u'mp4',
|
||||||
|
u'title': u'Garth Brooks - "The Call (featuring Trisha Yearwood)"',
|
||||||
|
u'description': u'Blame It All On My Roots',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
@@ -1,7 +1,13 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_duration,
|
||||||
|
url_basename,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CNNIE(InfoExtractor):
|
class CNNIE(InfoExtractor):
|
||||||
@@ -9,21 +15,24 @@ class CNNIE(InfoExtractor):
|
|||||||
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
|
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
u'url': u'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
|
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
|
||||||
u'file': u'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
|
'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
|
||||||
u'md5': u'3e6121ea48df7e2259fe73a0628605c4',
|
'md5': '3e6121ea48df7e2259fe73a0628605c4',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Nadal wins 8th French Open title',
|
'title': 'Nadal wins 8th French Open title',
|
||||||
u'description': u'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
|
'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
|
||||||
|
'duration': 135,
|
||||||
|
'upload_date': '20130609',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u"url": u"http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||||
u"file": u"us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
||||||
u"md5": u"b5cc60c60a3477d185af8f19a2a26f4e",
|
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||||
u"info_dict": {
|
"info_dict": {
|
||||||
u"title": "Student's epic speech stuns new freshmen",
|
"title": "Student's epic speech stuns new freshmen",
|
||||||
u"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\""
|
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||||
|
"upload_date": "20130821",
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@@ -31,26 +40,87 @@ class CNNIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
path = mobj.group('path')
|
path = mobj.group('path')
|
||||||
page_title = mobj.group('title')
|
page_title = mobj.group('title')
|
||||||
info_url = u'http://cnn.com/video/data/3.0/%s/index.xml' % path
|
info_url = 'http://cnn.com/video/data/3.0/%s/index.xml' % path
|
||||||
info = self._download_xml(info_url, page_title)
|
info = self._download_xml(info_url, page_title)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
rex = re.compile(r'''(?x)
|
||||||
|
(?P<width>[0-9]+)x(?P<height>[0-9]+)
|
||||||
|
(?:_(?P<bitrate>[0-9]+)k)?
|
||||||
|
''')
|
||||||
for f in info.findall('files/file'):
|
for f in info.findall('files/file'):
|
||||||
mf = re.match(r'(\d+)x(\d+)(?:_(.*)k)?',f.attrib['bitrate'])
|
video_url = 'http://ht.cdn.turner.com/cnn/big%s' % (f.text.strip())
|
||||||
if mf is not None:
|
fdct = {
|
||||||
formats.append((int(mf.group(1)), int(mf.group(2)), int(mf.group(3) or 0), f.text))
|
'format_id': f.attrib['bitrate'],
|
||||||
formats = sorted(formats)
|
'url': video_url,
|
||||||
(_,_,_, video_path) = formats[-1]
|
}
|
||||||
video_url = 'http://ht.cdn.turner.com/cnn/big%s' % video_path
|
|
||||||
|
mf = rex.match(f.attrib['bitrate'])
|
||||||
|
if mf:
|
||||||
|
fdct['width'] = int(mf.group('width'))
|
||||||
|
fdct['height'] = int(mf.group('height'))
|
||||||
|
fdct['tbr'] = int_or_none(mf.group('bitrate'))
|
||||||
|
else:
|
||||||
|
mf = rex.search(f.text)
|
||||||
|
if mf:
|
||||||
|
fdct['width'] = int(mf.group('width'))
|
||||||
|
fdct['height'] = int(mf.group('height'))
|
||||||
|
fdct['tbr'] = int_or_none(mf.group('bitrate'))
|
||||||
|
else:
|
||||||
|
mi = re.match(r'ios_(audio|[0-9]+)$', f.attrib['bitrate'])
|
||||||
|
if mi:
|
||||||
|
if mi.group(1) == 'audio':
|
||||||
|
fdct['vcodec'] = 'none'
|
||||||
|
fdct['ext'] = 'm4a'
|
||||||
|
else:
|
||||||
|
fdct['tbr'] = int(mi.group(1))
|
||||||
|
|
||||||
|
formats.append(fdct)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
|
thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
|
||||||
thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
|
thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
|
||||||
|
|
||||||
return {'id': info.attrib['id'],
|
metas_el = info.find('metas')
|
||||||
|
upload_date = (
|
||||||
|
metas_el.attrib.get('version') if metas_el is not None else None)
|
||||||
|
|
||||||
|
duration_el = info.find('length')
|
||||||
|
duration = parse_duration(duration_el.text)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': info.attrib['id'],
|
||||||
'title': info.find('headline').text,
|
'title': info.find('headline').text,
|
||||||
'url': video_url,
|
'formats': formats,
|
||||||
'ext': determine_ext(video_url),
|
|
||||||
'thumbnail': thumbnails[-1][1],
|
'thumbnail': thumbnails[-1][1],
|
||||||
'thumbnails': thumbs_dict,
|
'thumbnails': thumbs_dict,
|
||||||
'description': info.find('description').text,
|
'description': info.find('description').text,
|
||||||
|
'duration': duration,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CNNBlogsIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://[^\.]+\.blogs\.cnn\.com/.+'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://reliablesources.blogs.cnn.com/2014/02/09/criminalizing-journalism/',
|
||||||
|
'md5': '3e56f97b0b6ffb4b79f4ea0749551084',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'bestoftv/2014/02/09/criminalizing-journalism.cnn',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Criminalizing journalism?',
|
||||||
|
'description': 'Glenn Greenwald responds to comments made this week on Capitol Hill that journalists could be criminal accessories.',
|
||||||
|
'upload_date': '20140209',
|
||||||
|
},
|
||||||
|
'add_ie': ['CNN'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
webpage = self._download_webpage(url, url_basename(url))
|
||||||
|
cnn_url = self._html_search_regex(r'data-url="(.+?)"', webpage, 'cnn url')
|
||||||
|
return {
|
||||||
|
'_type': 'url',
|
||||||
|
'url': cnn_url,
|
||||||
|
'ie_key': CNNIE.ie_key(),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,82 +1,99 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import int_or_none
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
determine_ext,
|
|
||||||
|
|
||||||
ExtractorError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CollegeHumorIE(InfoExtractor):
|
class CollegeHumorIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
u'url': u'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
|
'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
|
||||||
u'file': u'6902724.mp4',
|
'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
|
||||||
u'md5': u'1264c12ad95dca142a9f0bf7968105a0',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '6902724',
|
||||||
u'title': u'Comic-Con Cosplay Catastrophe',
|
'ext': 'mp4',
|
||||||
u'description': u'Fans get creative this year at San Diego. Too creative. And yes, that\'s really Joss Whedon.',
|
'title': 'Comic-Con Cosplay Catastrophe',
|
||||||
|
'description': 'Fans get creative this year',
|
||||||
|
'age_limit': 13,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u'url': u'http://www.collegehumor.com/video/3505939/font-conference',
|
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
|
||||||
u'file': u'3505939.mp4',
|
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
|
||||||
u'md5': u'c51ca16b82bb456a4397987791a835f5',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '3505939',
|
||||||
u'title': u'Font Conference',
|
'ext': 'mp4',
|
||||||
u'description': u'This video wasn\'t long enough, so we made it double-spaced.',
|
'title': 'Font Conference',
|
||||||
|
'description': 'This video wasn\'t long enough,',
|
||||||
|
'age_limit': 10,
|
||||||
|
'duration': 179,
|
||||||
},
|
},
|
||||||
}]
|
},
|
||||||
|
# embedded youtube video
|
||||||
|
{
|
||||||
|
'url': 'http://www.collegehumor.com/embed/6950457',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'W5gMp3ZjYg4',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
|
||||||
|
'uploader': 'Funnyplox TV',
|
||||||
|
'uploader_id': 'funnyploxtv',
|
||||||
|
'description': 'md5:7ded37421526d54afdf005e25bc2b7a3',
|
||||||
|
'upload_date': '20140128',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'add_ie': ['Youtube'],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
video_id = mobj.group('videoid')
|
video_id = mobj.group('videoid')
|
||||||
|
|
||||||
info = {
|
jsonUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id + '.json'
|
||||||
'id': video_id,
|
data = json.loads(self._download_webpage(
|
||||||
'uploader': None,
|
jsonUrl, video_id, 'Downloading info JSON'))
|
||||||
'upload_date': None,
|
vdata = data['video']
|
||||||
|
if vdata.get('youtubeId') is not None:
|
||||||
|
return {
|
||||||
|
'_type': 'url',
|
||||||
|
'url': vdata['youtubeId'],
|
||||||
|
'ie_key': 'Youtube',
|
||||||
}
|
}
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
AGE_LIMITS = {'nc17': 18, 'r': 18, 'pg13': 13, 'pg': 10, 'g': 0}
|
||||||
xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
|
rating = vdata.get('rating')
|
||||||
mdoc = self._download_xml(xmlUrl, video_id,
|
if rating:
|
||||||
u'Downloading info XML',
|
age_limit = AGE_LIMITS.get(rating.lower())
|
||||||
u'Unable to download video info XML')
|
|
||||||
|
|
||||||
try:
|
|
||||||
videoNode = mdoc.findall('./video')[0]
|
|
||||||
youtubeIdNode = videoNode.find('./youtubeID')
|
|
||||||
if youtubeIdNode is not None:
|
|
||||||
return self.url_result(youtubeIdNode.text, 'Youtube')
|
|
||||||
info['description'] = videoNode.findall('./description')[0].text
|
|
||||||
info['title'] = videoNode.findall('./caption')[0].text
|
|
||||||
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
|
|
||||||
next_url = videoNode.findall('./file')[0].text
|
|
||||||
except IndexError:
|
|
||||||
raise ExtractorError(u'Invalid metadata XML file')
|
|
||||||
|
|
||||||
if next_url.endswith(u'manifest.f4m'):
|
|
||||||
manifest_url = next_url + '?hdcore=2.10.3'
|
|
||||||
adoc = self._download_xml(manifest_url, video_id,
|
|
||||||
u'Downloading XML manifest',
|
|
||||||
u'Unable to download video info XML')
|
|
||||||
|
|
||||||
try:
|
|
||||||
video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
|
|
||||||
except IndexError:
|
|
||||||
raise ExtractorError(u'Invalid manifest file')
|
|
||||||
url_pr = compat_urllib_parse_urlparse(info['thumbnail'])
|
|
||||||
info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','')
|
|
||||||
info['ext'] = 'mp4'
|
|
||||||
else:
|
else:
|
||||||
# Old-style direct links
|
age_limit = None # None = No idea
|
||||||
info['url'] = next_url
|
|
||||||
info['ext'] = determine_ext(info['url'])
|
|
||||||
|
|
||||||
return info
|
PREFS = {'high_quality': 2, 'low_quality': 0}
|
||||||
|
formats = []
|
||||||
|
for format_key in ('mp4', 'webm'):
|
||||||
|
for qname, qurl in vdata.get(format_key, {}).items():
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_key + '_' + qname,
|
||||||
|
'url': qurl,
|
||||||
|
'format': format_key,
|
||||||
|
'preference': PREFS.get(qname),
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
duration = int_or_none(vdata.get('duration'), 1000)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': vdata['title'],
|
||||||
|
'description': vdata.get('description'),
|
||||||
|
'thumbnail': vdata.get('thumbnail'),
|
||||||
|
'formats': formats,
|
||||||
|
'age_limit': age_limit,
|
||||||
|
'duration': duration,
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -12,31 +14,25 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class ComedyCentralIE(MTVServicesInfoExtractor):
|
class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www.)?comedycentral.com/(video-clips|episodes|cc-studios)/(?P<title>.*)'
|
_VALID_URL = r'''(?x)https?://(?:www\.)?comedycentral\.com/
|
||||||
_FEED_URL = u'http://comedycentral.com/feeds/mrss/'
|
(video-clips|episodes|cc-studios|video-collections)
|
||||||
|
/(?P<title>.*)'''
|
||||||
|
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
'url': 'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
||||||
u'md5': u'4167875aae411f903b751a21f357f1ee',
|
'md5': '4167875aae411f903b751a21f357f1ee',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'cef0cbb3-e776-4bc9-b62e-8016deccb354',
|
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'Uncensored - Greg Fitzsimmons - Too Good of a Mother',
|
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
|
||||||
u'description': u'After a certain point, breastfeeding becomes c**kblocking.',
|
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
title = mobj.group('title')
|
|
||||||
webpage = self._download_webpage(url, title)
|
|
||||||
mgid = self._search_regex(r'data-mgid="(?P<mgid>mgid:.*?)"',
|
|
||||||
webpage, u'mgid')
|
|
||||||
return self._get_videos_info(mgid)
|
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralShowsIE(InfoExtractor):
|
class ComedyCentralShowsIE(InfoExtractor):
|
||||||
IE_DESC = u'The Daily Show / Colbert Report'
|
IE_DESC = 'The Daily Show / Colbert Report'
|
||||||
# urls can be abbreviations like :thedailyshow or :colbert
|
# urls can be abbreviations like :thedailyshow or :colbert
|
||||||
# urls for episodes like:
|
# urls for episodes like:
|
||||||
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
|
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
|
||||||
@@ -53,14 +49,14 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?)))
|
extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?)))
|
||||||
$"""
|
$"""
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
|
'url': 'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
|
||||||
u'file': u'422212.mp4',
|
'file': '422212.mp4',
|
||||||
u'md5': u'4e2f5cb088a83cd8cdb7756132f9739d',
|
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"upload_date": u"20121214",
|
"upload_date": "20121214",
|
||||||
u"description": u"Kristen Stewart",
|
"description": "Kristen Stewart",
|
||||||
u"uploader": u"thedailyshow",
|
"uploader": "thedailyshow",
|
||||||
u"title": u"thedailyshow-kristen-stewart part 1"
|
"title": "thedailyshow-kristen-stewart part 1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,22 +86,22 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _transform_rtmp_url(rtmp_video_url):
|
def _transform_rtmp_url(rtmp_video_url):
|
||||||
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
|
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url)
|
||||||
if not m:
|
if not m:
|
||||||
raise ExtractorError(u'Cannot transform RTMP url')
|
raise ExtractorError('Cannot transform RTMP url')
|
||||||
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
|
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
|
||||||
return base + m.group('finalid')
|
return base + m.group('finalid')
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
raise ExtractorError('Invalid URL: %s' % url)
|
||||||
|
|
||||||
if mobj.group('shortname'):
|
if mobj.group('shortname'):
|
||||||
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
||||||
url = u'http://www.thedailyshow.com/full-episodes/'
|
url = 'http://www.thedailyshow.com/full-episodes/'
|
||||||
else:
|
else:
|
||||||
url = u'http://www.colbertnation.com/full-episodes/'
|
url = 'http://www.colbertnation.com/full-episodes/'
|
||||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||||
assert mobj is not None
|
assert mobj is not None
|
||||||
|
|
||||||
@@ -131,9 +127,9 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
url = htmlHandle.geturl()
|
url = htmlHandle.geturl()
|
||||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ExtractorError(u'Invalid redirected URL: ' + url)
|
raise ExtractorError('Invalid redirected URL: ' + url)
|
||||||
if mobj.group('episode') == '':
|
if mobj.group('episode') == '':
|
||||||
raise ExtractorError(u'Redirected URL is still not specific: ' + url)
|
raise ExtractorError('Redirected URL is still not specific: ' + url)
|
||||||
epTitle = mobj.group('episode')
|
epTitle = mobj.group('episode')
|
||||||
|
|
||||||
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
|
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
|
||||||
@@ -145,15 +141,15 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
|
|
||||||
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
|
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
|
||||||
if len(altMovieParams) == 0:
|
if len(altMovieParams) == 0:
|
||||||
raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
|
raise ExtractorError('unable to find Flash URL in webpage ' + url)
|
||||||
else:
|
else:
|
||||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
||||||
|
|
||||||
uri = mMovieParams[0][1]
|
uri = mMovieParams[0][1]
|
||||||
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
||||||
idoc = self._download_xml(indexUrl, epTitle,
|
idoc = self._download_xml(indexUrl, epTitle,
|
||||||
u'Downloading show index',
|
'Downloading show index',
|
||||||
u'unable to download episode index')
|
'unable to download episode index')
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
@@ -168,7 +164,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
||||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||||
cdoc = self._download_xml(configUrl, epTitle,
|
cdoc = self._download_xml(configUrl, epTitle,
|
||||||
u'Downloading configuration for %s' % shortMediaId)
|
'Downloading configuration for %s' % shortMediaId)
|
||||||
|
|
||||||
turls = []
|
turls = []
|
||||||
for rendition in cdoc.findall('.//rendition'):
|
for rendition in cdoc.findall('.//rendition'):
|
||||||
@@ -176,7 +172,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
turls.append(finfo)
|
turls.append(finfo)
|
||||||
|
|
||||||
if len(turls) == 0:
|
if len(turls) == 0:
|
||||||
self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
|
self._downloader.report_error('unable to download ' + mediaId + ': No videos found')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
@@ -190,7 +186,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
'width': w,
|
'width': w,
|
||||||
})
|
})
|
||||||
|
|
||||||
effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
|
effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum+1)
|
||||||
results.append({
|
results.append({
|
||||||
'id': shortMediaId,
|
'id': shortMediaId,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
import base64
|
import base64
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import socket
|
import socket
|
||||||
@@ -9,6 +11,7 @@ import xml.etree.ElementTree
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_http_client,
|
compat_http_client,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
compat_str,
|
compat_str,
|
||||||
|
|
||||||
clean_html,
|
clean_html,
|
||||||
@@ -18,6 +21,7 @@ from ..utils import (
|
|||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
_NO_DEFAULT = object()
|
||||||
|
|
||||||
|
|
||||||
class InfoExtractor(object):
|
class InfoExtractor(object):
|
||||||
@@ -36,10 +40,12 @@ class InfoExtractor(object):
|
|||||||
id: Video identifier.
|
id: Video identifier.
|
||||||
title: Video title, unescaped.
|
title: Video title, unescaped.
|
||||||
|
|
||||||
Additionally, it must contain either a formats entry or url and ext:
|
Additionally, it must contain either a formats entry or a url one:
|
||||||
|
|
||||||
formats: A list of dictionaries for each format available, it must
|
formats: A list of dictionaries for each format available, ordered
|
||||||
be ordered from worst to best quality. Potential fields:
|
from worst to best quality.
|
||||||
|
|
||||||
|
Potential fields:
|
||||||
* url Mandatory. The URL of the video file
|
* url Mandatory. The URL of the video file
|
||||||
* ext Will be calculated from url if missing
|
* ext Will be calculated from url if missing
|
||||||
* format A human-readable description of the format
|
* format A human-readable description of the format
|
||||||
@@ -47,23 +53,38 @@ class InfoExtractor(object):
|
|||||||
Calculated from the format_id, width, height.
|
Calculated from the format_id, width, height.
|
||||||
and format_note fields if missing.
|
and format_note fields if missing.
|
||||||
* format_id A short description of the format
|
* format_id A short description of the format
|
||||||
("mp4_h264_opus" or "19")
|
("mp4_h264_opus" or "19").
|
||||||
|
Technically optional, but strongly recommended.
|
||||||
* format_note Additional info about the format
|
* format_note Additional info about the format
|
||||||
("3D" or "DASH video")
|
("3D" or "DASH video")
|
||||||
* width Width of the video, if known
|
* width Width of the video, if known
|
||||||
* height Height of the video, if known
|
* height Height of the video, if known
|
||||||
|
* resolution Textual description of width and height
|
||||||
|
* tbr Average bitrate of audio and video in KBit/s
|
||||||
* abr Average audio bitrate in KBit/s
|
* abr Average audio bitrate in KBit/s
|
||||||
* acodec Name of the audio codec in use
|
* acodec Name of the audio codec in use
|
||||||
|
* asr Audio sampling rate in Hertz
|
||||||
* vbr Average video bitrate in KBit/s
|
* vbr Average video bitrate in KBit/s
|
||||||
* vcodec Name of the video codec in use
|
* vcodec Name of the video codec in use
|
||||||
|
* container Name of the container format
|
||||||
* filesize The number of bytes, if known in advance
|
* filesize The number of bytes, if known in advance
|
||||||
* player_url SWF Player URL (used for rtmpdump).
|
* player_url SWF Player URL (used for rtmpdump).
|
||||||
|
* protocol The protocol that will be used for the actual
|
||||||
|
download, lower-case.
|
||||||
|
"http", "https", "rtsp", "rtmp", "m3u8" or so.
|
||||||
|
* preference Order number of this format. If this field is
|
||||||
|
present and not None, the formats get sorted
|
||||||
|
by this field.
|
||||||
|
-1 for default (order by other properties),
|
||||||
|
-2 or smaller for less than default.
|
||||||
|
* quality Order number of the video quality of this
|
||||||
|
format, irrespective of the file format.
|
||||||
|
-1 for default (order by other properties),
|
||||||
|
-2 or smaller for less than default.
|
||||||
url: Final video URL.
|
url: Final video URL.
|
||||||
ext: Video filename extension.
|
ext: Video filename extension.
|
||||||
format: The video format, defaults to ext (used for --get-format)
|
format: The video format, defaults to ext (used for --get-format)
|
||||||
player_url: SWF Player URL (used for rtmpdump).
|
player_url: SWF Player URL (used for rtmpdump).
|
||||||
urlhandle: [internal] The urlHandle to be used to download the file,
|
|
||||||
like returned by urllib.request.urlopen
|
|
||||||
|
|
||||||
The following fields are optional:
|
The following fields are optional:
|
||||||
|
|
||||||
@@ -169,6 +190,8 @@ class InfoExtractor(object):
|
|||||||
try:
|
try:
|
||||||
return self._downloader.urlopen(url_or_request)
|
return self._downloader.urlopen(url_or_request)
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
|
if errnote is False:
|
||||||
|
return False
|
||||||
if errnote is None:
|
if errnote is None:
|
||||||
errnote = u'Unable to download webpage'
|
errnote = u'Unable to download webpage'
|
||||||
errmsg = u'%s: %s' % (errnote, compat_str(err))
|
errmsg = u'%s: %s' % (errnote, compat_str(err))
|
||||||
@@ -199,6 +222,8 @@ class InfoExtractor(object):
|
|||||||
webpage_bytes[:1024])
|
webpage_bytes[:1024])
|
||||||
if m:
|
if m:
|
||||||
encoding = m.group(1).decode('ascii')
|
encoding = m.group(1).decode('ascii')
|
||||||
|
elif webpage_bytes.startswith(b'\xff\xfe'):
|
||||||
|
encoding = 'utf-16'
|
||||||
else:
|
else:
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
if self._downloader.params.get('dump_intermediate_pages', False):
|
if self._downloader.params.get('dump_intermediate_pages', False):
|
||||||
@@ -214,6 +239,9 @@ class InfoExtractor(object):
|
|||||||
url = url_or_request.get_full_url()
|
url = url_or_request.get_full_url()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
url = url_or_request
|
url = url_or_request
|
||||||
|
if len(url) > 200:
|
||||||
|
h = u'___' + hashlib.md5(url.encode('utf-8')).hexdigest()
|
||||||
|
url = url[:200 - len(h)] + h
|
||||||
raw_filename = ('%s_%s.dump' % (video_id, url))
|
raw_filename = ('%s_%s.dump' % (video_id, url))
|
||||||
filename = sanitize_filename(raw_filename, restricted=True)
|
filename = sanitize_filename(raw_filename, restricted=True)
|
||||||
self.to_screen(u'Saving request to ' + filename)
|
self.to_screen(u'Saving request to ' + filename)
|
||||||
@@ -241,6 +269,23 @@ class InfoExtractor(object):
|
|||||||
xml_string = transform_source(xml_string)
|
xml_string = transform_source(xml_string)
|
||||||
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
|
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
|
||||||
|
|
||||||
|
def _download_json(self, url_or_request, video_id,
|
||||||
|
note=u'Downloading JSON metadata',
|
||||||
|
errnote=u'Unable to download JSON metadata',
|
||||||
|
transform_source=None):
|
||||||
|
json_string = self._download_webpage(url_or_request, video_id, note, errnote)
|
||||||
|
if transform_source:
|
||||||
|
json_string = transform_source(json_string)
|
||||||
|
try:
|
||||||
|
return json.loads(json_string)
|
||||||
|
except ValueError as ve:
|
||||||
|
raise ExtractorError('Failed to download JSON', cause=ve)
|
||||||
|
|
||||||
|
def report_warning(self, msg, video_id=None):
|
||||||
|
idstr = u'' if video_id is None else u'%s: ' % video_id
|
||||||
|
self._downloader.report_warning(
|
||||||
|
u'[%s] %s%s' % (self.IE_NAME, idstr, msg))
|
||||||
|
|
||||||
def to_screen(self, msg):
|
def to_screen(self, msg):
|
||||||
"""Print msg to screen, prefixing it with '[ie_name]'"""
|
"""Print msg to screen, prefixing it with '[ie_name]'"""
|
||||||
self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
|
self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
|
||||||
@@ -262,7 +307,8 @@ class InfoExtractor(object):
|
|||||||
self.to_screen(u'Logging in')
|
self.to_screen(u'Logging in')
|
||||||
|
|
||||||
#Methods for following #608
|
#Methods for following #608
|
||||||
def url_result(self, url, ie=None, video_id=None):
|
@staticmethod
|
||||||
|
def url_result(url, ie=None, video_id=None):
|
||||||
"""Returns a url that points to a page that should be processed"""
|
"""Returns a url that points to a page that should be processed"""
|
||||||
#TODO: ie should be the class used for getting the info
|
#TODO: ie should be the class used for getting the info
|
||||||
video_info = {'_type': 'url',
|
video_info = {'_type': 'url',
|
||||||
@@ -271,7 +317,8 @@ class InfoExtractor(object):
|
|||||||
if video_id is not None:
|
if video_id is not None:
|
||||||
video_info['id'] = video_id
|
video_info['id'] = video_id
|
||||||
return video_info
|
return video_info
|
||||||
def playlist_result(self, entries, playlist_id=None, playlist_title=None):
|
@staticmethod
|
||||||
|
def playlist_result(entries, playlist_id=None, playlist_title=None):
|
||||||
"""Returns a playlist"""
|
"""Returns a playlist"""
|
||||||
video_info = {'_type': 'playlist',
|
video_info = {'_type': 'playlist',
|
||||||
'entries': entries}
|
'entries': entries}
|
||||||
@@ -281,7 +328,7 @@ class InfoExtractor(object):
|
|||||||
video_info['title'] = playlist_title
|
video_info['title'] = playlist_title
|
||||||
return video_info
|
return video_info
|
||||||
|
|
||||||
def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
|
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
||||||
"""
|
"""
|
||||||
Perform a regex search on the given string, using a single or a list of
|
Perform a regex search on the given string, using a single or a list of
|
||||||
patterns returning the first matching group.
|
patterns returning the first matching group.
|
||||||
@@ -295,7 +342,7 @@ class InfoExtractor(object):
|
|||||||
mobj = re.search(p, string, flags)
|
mobj = re.search(p, string, flags)
|
||||||
if mobj: break
|
if mobj: break
|
||||||
|
|
||||||
if sys.stderr.isatty() and os.name != 'nt':
|
if os.name != 'nt' and sys.stderr.isatty():
|
||||||
_name = u'\033[0;34m%s\033[0m' % name
|
_name = u'\033[0;34m%s\033[0m' % name
|
||||||
else:
|
else:
|
||||||
_name = name
|
_name = name
|
||||||
@@ -303,7 +350,7 @@ class InfoExtractor(object):
|
|||||||
if mobj:
|
if mobj:
|
||||||
# return the first matching group
|
# return the first matching group
|
||||||
return next(g for g in mobj.groups() if g is not None)
|
return next(g for g in mobj.groups() if g is not None)
|
||||||
elif default is not None:
|
elif default is not _NO_DEFAULT:
|
||||||
return default
|
return default
|
||||||
elif fatal:
|
elif fatal:
|
||||||
raise RegexNotFoundError(u'Unable to extract %s' % _name)
|
raise RegexNotFoundError(u'Unable to extract %s' % _name)
|
||||||
@@ -312,7 +359,7 @@ class InfoExtractor(object):
|
|||||||
u'please report this issue on http://yt-dl.org/bug' % _name)
|
u'please report this issue on http://yt-dl.org/bug' % _name)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
|
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
||||||
"""
|
"""
|
||||||
Like _search_regex, but strips HTML tags and unescapes entities.
|
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||||
"""
|
"""
|
||||||
@@ -355,8 +402,8 @@ class InfoExtractor(object):
|
|||||||
# Helper functions for extracting OpenGraph info
|
# Helper functions for extracting OpenGraph info
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _og_regexes(prop):
|
def _og_regexes(prop):
|
||||||
content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')'
|
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
|
||||||
property_re = r'property=[\'"]og:%s[\'"]' % re.escape(prop)
|
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
|
||||||
template = r'<meta[^>]+?%s[^>]+?%s'
|
template = r'<meta[^>]+?%s[^>]+?%s'
|
||||||
return [
|
return [
|
||||||
template % (property_re, content_re),
|
template % (property_re, content_re),
|
||||||
@@ -421,6 +468,65 @@ class InfoExtractor(object):
|
|||||||
}
|
}
|
||||||
return RATING_TABLE.get(rating.lower(), None)
|
return RATING_TABLE.get(rating.lower(), None)
|
||||||
|
|
||||||
|
def _twitter_search_player(self, html):
|
||||||
|
return self._html_search_meta('twitter:player', html,
|
||||||
|
'twitter card player')
|
||||||
|
|
||||||
|
def _sort_formats(self, formats):
|
||||||
|
if not formats:
|
||||||
|
raise ExtractorError(u'No video formats found')
|
||||||
|
|
||||||
|
def _formats_key(f):
|
||||||
|
# TODO remove the following workaround
|
||||||
|
from ..utils import determine_ext
|
||||||
|
if not f.get('ext') and 'url' in f:
|
||||||
|
f['ext'] = determine_ext(f['url'])
|
||||||
|
|
||||||
|
preference = f.get('preference')
|
||||||
|
if preference is None:
|
||||||
|
proto = f.get('protocol')
|
||||||
|
if proto is None:
|
||||||
|
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
|
||||||
|
|
||||||
|
preference = 0 if proto in ['http', 'https'] else -0.1
|
||||||
|
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
|
||||||
|
preference -= 0.5
|
||||||
|
|
||||||
|
if f.get('vcodec') == 'none': # audio only
|
||||||
|
if self._downloader.params.get('prefer_free_formats'):
|
||||||
|
ORDER = [u'aac', u'mp3', u'm4a', u'webm', u'ogg', u'opus']
|
||||||
|
else:
|
||||||
|
ORDER = [u'webm', u'opus', u'ogg', u'mp3', u'aac', u'm4a']
|
||||||
|
ext_preference = 0
|
||||||
|
try:
|
||||||
|
audio_ext_preference = ORDER.index(f['ext'])
|
||||||
|
except ValueError:
|
||||||
|
audio_ext_preference = -1
|
||||||
|
else:
|
||||||
|
if self._downloader.params.get('prefer_free_formats'):
|
||||||
|
ORDER = [u'flv', u'mp4', u'webm']
|
||||||
|
else:
|
||||||
|
ORDER = [u'webm', u'flv', u'mp4']
|
||||||
|
try:
|
||||||
|
ext_preference = ORDER.index(f['ext'])
|
||||||
|
except ValueError:
|
||||||
|
ext_preference = -1
|
||||||
|
audio_ext_preference = 0
|
||||||
|
|
||||||
|
return (
|
||||||
|
preference,
|
||||||
|
f.get('quality') if f.get('quality') is not None else -1,
|
||||||
|
f.get('height') if f.get('height') is not None else -1,
|
||||||
|
f.get('width') if f.get('width') is not None else -1,
|
||||||
|
ext_preference,
|
||||||
|
f.get('tbr') if f.get('tbr') is not None else -1,
|
||||||
|
f.get('vbr') if f.get('vbr') is not None else -1,
|
||||||
|
f.get('abr') if f.get('abr') is not None else -1,
|
||||||
|
audio_ext_preference,
|
||||||
|
f.get('filesize') if f.get('filesize') is not None else -1,
|
||||||
|
f.get('format_id'),
|
||||||
|
)
|
||||||
|
formats.sort(key=_formats_key)
|
||||||
|
|
||||||
|
|
||||||
class SearchInfoExtractor(InfoExtractor):
|
class SearchInfoExtractor(InfoExtractor):
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
@@ -20,30 +21,31 @@ class CondeNastIE(InfoExtractor):
|
|||||||
|
|
||||||
# The keys are the supported sites and the values are the name to be shown
|
# The keys are the supported sites and the values are the name to be shown
|
||||||
# to the user and in the extractor description.
|
# to the user and in the extractor description.
|
||||||
_SITES = {'wired': u'WIRED',
|
_SITES = {
|
||||||
'gq': u'GQ',
|
'wired': 'WIRED',
|
||||||
'vogue': u'Vogue',
|
'gq': 'GQ',
|
||||||
'glamour': u'Glamour',
|
'vogue': 'Vogue',
|
||||||
'wmagazine': u'W Magazine',
|
'glamour': 'Glamour',
|
||||||
'vanityfair': u'Vanity Fair',
|
'wmagazine': 'W Magazine',
|
||||||
|
'vanityfair': 'Vanity Fair',
|
||||||
}
|
}
|
||||||
|
|
||||||
_VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys())
|
_VALID_URL = r'http://(video|www)\.(?P<site>%s)\.com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys())
|
||||||
IE_DESC = u'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
|
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
|
||||||
u'file': u'5171b343c2b4c00dd0c1ccb3.mp4',
|
'file': '5171b343c2b4c00dd0c1ccb3.mp4',
|
||||||
u'md5': u'1921f713ed48aabd715691f774c451f7',
|
'md5': '1921f713ed48aabd715691f774c451f7',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'3D Printed Speakers Lit With LED',
|
'title': '3D Printed Speakers Lit With LED',
|
||||||
u'description': u'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
|
'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _extract_series(self, url, webpage):
|
def _extract_series(self, url, webpage):
|
||||||
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
|
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
|
||||||
webpage, u'series title', flags=re.DOTALL)
|
webpage, 'series title', flags=re.DOTALL)
|
||||||
url_object = compat_urllib_parse_urlparse(url)
|
url_object = compat_urllib_parse_urlparse(url)
|
||||||
base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
|
base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
|
||||||
m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
|
m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
|
||||||
@@ -57,35 +59,37 @@ class CondeNastIE(InfoExtractor):
|
|||||||
description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>',
|
description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>',
|
||||||
r'<div class="video-post-content">(.+?)</div>',
|
r'<div class="video-post-content">(.+?)</div>',
|
||||||
],
|
],
|
||||||
webpage, u'description',
|
webpage, 'description',
|
||||||
fatal=False, flags=re.DOTALL)
|
fatal=False, flags=re.DOTALL)
|
||||||
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
|
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
|
||||||
u'player params', flags=re.DOTALL)
|
'player params', flags=re.DOTALL)
|
||||||
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, u'video id')
|
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
|
||||||
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, u'player id')
|
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
|
||||||
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, u'target')
|
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
|
||||||
data = compat_urllib_parse.urlencode({'videoId': video_id,
|
data = compat_urllib_parse.urlencode({'videoId': video_id,
|
||||||
'playerId': player_id,
|
'playerId': player_id,
|
||||||
'target': target,
|
'target': target,
|
||||||
})
|
})
|
||||||
base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
|
base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
|
||||||
webpage, u'base info url',
|
webpage, 'base info url',
|
||||||
default='http://player.cnevids.com/player/loader.js?')
|
default='http://player.cnevids.com/player/loader.js?')
|
||||||
info_url = base_info_url + data
|
info_url = base_info_url + data
|
||||||
info_page = self._download_webpage(info_url, video_id,
|
info_page = self._download_webpage(info_url, video_id,
|
||||||
u'Downloading video info')
|
'Downloading video info')
|
||||||
video_info = self._search_regex(r'var video = ({.+?});', info_page, u'video info')
|
video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info')
|
||||||
video_info = json.loads(video_info)
|
video_info = json.loads(video_info)
|
||||||
|
|
||||||
def _formats_sort_key(f):
|
formats = [{
|
||||||
type_ord = 1 if f['type'] == 'video/mp4' else 0
|
'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']),
|
||||||
quality_ord = 1 if f['quality'] == 'high' else 0
|
'url': fdata['src'],
|
||||||
return (quality_ord, type_ord)
|
'ext': fdata['type'].split('/')[-1],
|
||||||
best_format = sorted(video_info['sources'][0], key=_formats_sort_key)[-1]
|
'quality': 1 if fdata['quality'] == 'high' else 0,
|
||||||
|
} for fdata in video_info['sources'][0]]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {'id': video_id,
|
return {
|
||||||
'url': best_format['src'],
|
'id': video_id,
|
||||||
'ext': best_format['type'].split('/')[-1],
|
'formats': formats,
|
||||||
'title': video_info['title'],
|
'title': video_info['title'],
|
||||||
'thumbnail': video_info['poster_frame'],
|
'thumbnail': video_info['poster_frame'],
|
||||||
'description': description,
|
'description': description,
|
||||||
|
|||||||
178
youtube_dl/extractor/crunchyroll.py
Normal file
178
youtube_dl/extractor/crunchyroll.py
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re, base64, zlib
|
||||||
|
from hashlib import sha1
|
||||||
|
from math import pow, sqrt, floor
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
compat_urllib_parse,
|
||||||
|
compat_urllib_request,
|
||||||
|
bytes_to_intlist,
|
||||||
|
intlist_to_bytes,
|
||||||
|
unified_strdate,
|
||||||
|
clean_html,
|
||||||
|
)
|
||||||
|
from ..aes import (
|
||||||
|
aes_cbc_decrypt,
|
||||||
|
inc,
|
||||||
|
)
|
||||||
|
|
||||||
|
class CrunchyrollIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?:https?://)?(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||||
|
'file': '645513.flv',
|
||||||
|
#'md5': 'b1639fd6ddfaa43788c85f6d1dddd412',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!',
|
||||||
|
'description': 'md5:2d17137920c64f2f49981a7797d275ef',
|
||||||
|
'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
|
||||||
|
'uploader': 'Yomiuri Telecasting Corporation (YTV)',
|
||||||
|
'upload_date': '20131013',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
_FORMAT_IDS = {
|
||||||
|
'360': ('60', '106'),
|
||||||
|
'480': ('61', '106'),
|
||||||
|
'720': ('62', '106'),
|
||||||
|
'1080': ('80', '108'),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _decrypt_subtitles(self, data, iv, id):
|
||||||
|
data = bytes_to_intlist(data)
|
||||||
|
iv = bytes_to_intlist(iv)
|
||||||
|
id = int(id)
|
||||||
|
|
||||||
|
def obfuscate_key_aux(count, modulo, start):
|
||||||
|
output = list(start)
|
||||||
|
for _ in range(count):
|
||||||
|
output.append(output[-1] + output[-2])
|
||||||
|
# cut off start values
|
||||||
|
output = output[2:]
|
||||||
|
output = list(map(lambda x: x % modulo + 33, output))
|
||||||
|
return output
|
||||||
|
|
||||||
|
def obfuscate_key(key):
|
||||||
|
num1 = int(floor(pow(2, 25) * sqrt(6.9)))
|
||||||
|
num2 = (num1 ^ key) << 5
|
||||||
|
num3 = key ^ num1
|
||||||
|
num4 = num3 ^ (num3 >> 3) ^ num2
|
||||||
|
prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2)))
|
||||||
|
shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
|
||||||
|
# Extend 160 Bit hash to 256 Bit
|
||||||
|
return shaHash + [0] * 12
|
||||||
|
|
||||||
|
key = obfuscate_key(id)
|
||||||
|
class Counter:
|
||||||
|
__value = iv
|
||||||
|
def next_value(self):
|
||||||
|
temp = self.__value
|
||||||
|
self.__value = inc(self.__value)
|
||||||
|
return temp
|
||||||
|
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
|
||||||
|
return zlib.decompress(decrypted_data)
|
||||||
|
|
||||||
|
def _convert_subtitles_to_srt(self, subtitles):
|
||||||
|
i=1
|
||||||
|
output = ''
|
||||||
|
for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles):
|
||||||
|
start = start.replace('.', ',')
|
||||||
|
end = end.replace('.', ',')
|
||||||
|
text = clean_html(text)
|
||||||
|
text = text.replace('\\N', '\n')
|
||||||
|
if not text:
|
||||||
|
continue
|
||||||
|
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
||||||
|
i+=1
|
||||||
|
return output
|
||||||
|
|
||||||
|
def _real_extract(self,url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('video_id')
|
||||||
|
|
||||||
|
if mobj.group('prefix') == 'm':
|
||||||
|
mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage')
|
||||||
|
webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url')
|
||||||
|
else:
|
||||||
|
webpage_url = 'http://www.' + mobj.group('url')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
|
||||||
|
note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='')
|
||||||
|
if note_m:
|
||||||
|
raise ExtractorError(note_m)
|
||||||
|
|
||||||
|
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
|
||||||
|
video_title = re.sub(r' {2,}', ' ', video_title)
|
||||||
|
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
|
||||||
|
if not video_description:
|
||||||
|
video_description = None
|
||||||
|
video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
|
||||||
|
if video_upload_date:
|
||||||
|
video_upload_date = unified_strdate(video_upload_date)
|
||||||
|
video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
|
||||||
|
|
||||||
|
playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
|
||||||
|
playerdata_req = compat_urllib_request.Request(playerdata_url)
|
||||||
|
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
|
||||||
|
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
|
||||||
|
|
||||||
|
stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id')
|
||||||
|
video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
|
||||||
|
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||||
|
video_format = fmt+'p'
|
||||||
|
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
||||||
|
# urlencode doesn't work!
|
||||||
|
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
|
||||||
|
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
||||||
|
streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
|
||||||
|
video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
|
||||||
|
video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
|
||||||
|
formats.append({
|
||||||
|
'url': video_url,
|
||||||
|
'play_path': video_play_path,
|
||||||
|
'ext': 'flv',
|
||||||
|
'format': video_format,
|
||||||
|
'format_id': video_format,
|
||||||
|
})
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||||
|
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
|
||||||
|
video_id, note='Downloading subtitles for '+sub_name)
|
||||||
|
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||||
|
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||||
|
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||||
|
if not id or not iv or not data:
|
||||||
|
continue
|
||||||
|
id = int(id)
|
||||||
|
iv = base64.b64decode(iv)
|
||||||
|
data = base64.b64decode(data)
|
||||||
|
|
||||||
|
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
|
||||||
|
lang_code = self._search_regex(r'lang_code=\'([^\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||||
|
if not lang_code:
|
||||||
|
continue
|
||||||
|
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': video_title,
|
||||||
|
'description': video_description,
|
||||||
|
'thumbnail': video_thumbnail,
|
||||||
|
'uploader': video_uploader,
|
||||||
|
'upload_date': video_upload_date,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
@@ -1,51 +1,60 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urllib_parse,
|
unescapeHTML,
|
||||||
|
find_xpath_attr,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CSpanIE(InfoExtractor):
|
class CSpanIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://www\.c-spanvideo\.org/program/(.*)'
|
_VALID_URL = r'http://(?:www\.)?c-span\.org/video/\?(?P<id>\d+)'
|
||||||
|
IE_DESC = 'C-SPAN'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.c-spanvideo.org/program/HolderonV',
|
'url': 'http://www.c-span.org/video/?313572-1/HolderonV',
|
||||||
u'file': u'315139.flv',
|
'md5': '8e44ce11f0f725527daccc453f553eb0',
|
||||||
u'md5': u'74a623266956f69e4df0068ab6c80fe4',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '315139',
|
||||||
u"title": u"Attorney General Eric Holder on Voting Rights Act Decision"
|
'ext': 'mp4',
|
||||||
|
'title': 'Attorney General Eric Holder on Voting Rights Act Decision',
|
||||||
|
'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.',
|
||||||
},
|
},
|
||||||
u'skip': u'Requires rtmpdump'
|
'skip': 'Regularly fails on travis, for unknown reasons',
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
prog_name = mobj.group(1)
|
page_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, prog_name)
|
webpage = self._download_webpage(url, page_id)
|
||||||
video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id')
|
video_id = self._search_regex(r'data-progid=\'(\d+)\'>', webpage, 'video id')
|
||||||
data = compat_urllib_parse.urlencode({'programid': video_id,
|
|
||||||
'dynamic':'1'})
|
|
||||||
info_url = 'http://www.c-spanvideo.org/common/services/flashXml.php?' + data
|
|
||||||
video_info = self._download_webpage(info_url, video_id, u'Downloading video info')
|
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
description = self._html_search_regex(
|
||||||
|
[
|
||||||
|
# The full description
|
||||||
|
r'<div class=\'expandable\'>(.*?)<a href=\'#\'',
|
||||||
|
# If the description is small enough the other div is not
|
||||||
|
# present, otherwise this is a stripped version
|
||||||
|
r'<p class=\'initial\'>(.*?)</p>'
|
||||||
|
],
|
||||||
|
webpage, 'description', flags=re.DOTALL)
|
||||||
|
|
||||||
title = self._html_search_regex(r'<string name="title">(.*?)</string>',
|
info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id
|
||||||
video_info, 'title')
|
data = self._download_json(info_url, video_id)
|
||||||
description = self._html_search_regex(r'<meta (?:property="og:|name=")description" content="(.*?)"',
|
|
||||||
webpage, 'description',
|
|
||||||
flags=re.MULTILINE|re.DOTALL)
|
|
||||||
|
|
||||||
url = self._search_regex(r'<string name="URL">(.*?)</string>',
|
url = unescapeHTML(data['video']['files'][0]['path']['#text'])
|
||||||
video_info, 'video url')
|
|
||||||
url = url.replace('$(protocol)', 'rtmp').replace('$(port)', '443')
|
|
||||||
path = self._search_regex(r'<string name="path">(.*?)</string>',
|
|
||||||
video_info, 'rtmp play path')
|
|
||||||
|
|
||||||
return {'id': video_id,
|
doc = self._download_xml('http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
|
||||||
'title': title,
|
video_id)
|
||||||
'ext': 'flv',
|
|
||||||
|
def find_string(s):
|
||||||
|
return find_xpath_attr(doc, './/string', 'name', s).text
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': find_string('title'),
|
||||||
'url': url,
|
'url': url,
|
||||||
'play_path': path,
|
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': find_string('poster'),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,22 +1,25 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .canalplus import CanalplusIE
|
from .canalplus import CanalplusIE
|
||||||
|
|
||||||
|
|
||||||
class D8IE(CanalplusIE):
|
class D8IE(CanalplusIE):
|
||||||
_VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
|
_VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
|
||||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
|
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
|
||||||
IE_NAME = u'd8.tv'
|
IE_NAME = 'd8.tv'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
||||||
u'file': u'966289.flv',
|
'file': '966289.flv',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Campagne intime - Documentaire exceptionnel',
|
'title': 'Campagne intime - Documentaire exceptionnel',
|
||||||
u'description': u'md5:d2643b799fb190846ae09c61e59a859f',
|
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
|
||||||
u'upload_date': u'20131108',
|
'upload_date': '20131108',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# rtmp
|
# rtmp
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
'skip': 'videos get deleted after a while',
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@@ -5,15 +7,14 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
|
|
||||||
class DefenseGouvFrIE(InfoExtractor):
|
class DefenseGouvFrIE(InfoExtractor):
|
||||||
_IE_NAME = 'defense.gouv.fr'
|
IE_NAME = 'defense.gouv.fr'
|
||||||
_VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
|
_VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
|
||||||
r'ligthboxvideo/base-de-medias/webtv/(.*)')
|
r'ligthboxvideo/base-de-medias/webtv/(.*)')
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': (u'http://www.defense.gouv.fr/layout/set/ligthboxvideo/'
|
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
|
||||||
u'base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1'),
|
'file': '11213.mp4',
|
||||||
u'file': u'11213.mp4',
|
'md5': '75bba6124da7e63d2d60b5244ec9430c',
|
||||||
u'md5': u'75bba6124da7e63d2d60b5244ec9430c',
|
|
||||||
"info_dict": {
|
"info_dict": {
|
||||||
"title": "attaque-chimique-syrienne-du-21-aout-2013-1"
|
"title": "attaque-chimique-syrienne-du-21-aout-2013-1"
|
||||||
}
|
}
|
||||||
|
|||||||
46
youtube_dl/extractor/discovery.py
Normal file
46
youtube_dl/extractor/discovery.py
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class DiscoveryIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://dsc\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://dsc.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
|
||||||
|
'file': '614784.mp4',
|
||||||
|
'md5': 'e12614f9ee303a6ccef415cb0793eba2',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'MythBusters: Mission Impossible Outtakes',
|
||||||
|
'description': ('Watch Jamie Hyneman and Adam Savage practice being'
|
||||||
|
' each other -- to the point of confusing Jamie\'s dog -- and '
|
||||||
|
'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
|
||||||
|
' back.'),
|
||||||
|
'duration': 156,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
|
||||||
|
webpage, 'video list', flags=re.DOTALL)
|
||||||
|
video_list = json.loads(video_list_json)
|
||||||
|
info = video_list['clips'][0]
|
||||||
|
formats = []
|
||||||
|
for f in info['mp4']:
|
||||||
|
formats.append(
|
||||||
|
{'url': f['src'], r'ext': r'mp4', 'tbr': int(f['bitrate'][:-1])})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': info['contentId'],
|
||||||
|
'title': video_list['name'],
|
||||||
|
'formats': formats,
|
||||||
|
'description': info['videoCaption'],
|
||||||
|
'thumbnail': info.get('videoStillURL') or info.get('thumbnailURL'),
|
||||||
|
'duration': info['duration'],
|
||||||
|
}
|
||||||
@@ -1,34 +1,35 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class DotsubIE(InfoExtractor):
|
class DotsubIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?dotsub\.com/view/([^/]+)'
|
_VALID_URL = r'http://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
'url': 'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
||||||
u'file': u'aed3b8b2-1889-4df5-ae63-ad85f5572f27.flv',
|
'md5': '0914d4d69605090f623b7ac329fea66e',
|
||||||
u'md5': u'0914d4d69605090f623b7ac329fea66e',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': 'aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
||||||
u"title": u"Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary",
|
'ext': 'flv',
|
||||||
u"uploader": u"4v4l0n42",
|
'title': 'Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary',
|
||||||
u'description': u'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
|
'uploader': '4v4l0n42',
|
||||||
u'thumbnail': u'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
|
'description': 'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
|
||||||
u'upload_date': u'20101213',
|
'thumbnail': 'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
|
||||||
|
'upload_date': '20101213',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group(1)
|
video_id = mobj.group('id')
|
||||||
info_url = "https://dotsub.com/api/media/%s/metadata" %(video_id)
|
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
||||||
webpage = self._download_webpage(info_url, video_id)
|
info = self._download_json(info_url, video_id)
|
||||||
info = json.loads(webpage)
|
|
||||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
||||||
|
|
||||||
return [{
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': info['mediaURI'],
|
'url': info['mediaURI'],
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
@@ -37,5 +38,5 @@ class DotsubIE(InfoExtractor):
|
|||||||
'description': info['description'],
|
'description': info['description'],
|
||||||
'uploader': info['user'],
|
'uploader': info['user'],
|
||||||
'view_count': info['numberOfViews'],
|
'view_count': info['numberOfViews'],
|
||||||
'upload_date': u'%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
|
'upload_date': '%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
|
||||||
}]
|
}
|
||||||
|
|||||||
@@ -4,18 +4,17 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class DreiSatIE(InfoExtractor):
|
class DreiSatIE(InfoExtractor):
|
||||||
IE_NAME = '3sat'
|
IE_NAME = '3sat'
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/index\.php\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
|
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983",
|
u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983",
|
||||||
u'file': u'36983.webm',
|
u'file': u'36983.mp4',
|
||||||
u'md5': u'57c97d0469d71cf874f6815aa2b7c944',
|
u'md5': u'9dcfe344732808dbfcc901537973c922',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"title": u"Kaffeeland Schweiz",
|
u"title": u"Kaffeeland Schweiz",
|
||||||
u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",
|
u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",
|
||||||
@@ -52,18 +51,12 @@ class DreiSatIE(InfoExtractor):
|
|||||||
'width': int(fe.find('./width').text),
|
'width': int(fe.find('./width').text),
|
||||||
'height': int(fe.find('./height').text),
|
'height': int(fe.find('./height').text),
|
||||||
'url': fe.find('./url').text,
|
'url': fe.find('./url').text,
|
||||||
'ext': determine_ext(fe.find('./url').text),
|
|
||||||
'filesize': int(fe.find('./filesize').text),
|
'filesize': int(fe.find('./filesize').text),
|
||||||
'video_bitrate': int(fe.find('./videoBitrate').text),
|
'video_bitrate': int(fe.find('./videoBitrate').text),
|
||||||
'3sat_qualityname': fe.find('./quality').text,
|
|
||||||
} for fe in format_els
|
} for fe in format_els
|
||||||
if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
|
if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
|
||||||
|
|
||||||
def _sortkey(format):
|
self._sort_formats(formats)
|
||||||
qidx = ['low', 'med', 'high', 'veryhigh'].index(format['3sat_qualityname'])
|
|
||||||
prefer_http = 1 if 'rtmp' in format['url'] else 0
|
|
||||||
return (qidx, prefer_http, format['video_bitrate'])
|
|
||||||
formats.sort(key=_sortkey)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'video',
|
'_type': 'video',
|
||||||
|
|||||||
32
youtube_dl/extractor/dropbox.py
Normal file
32
youtube_dl/extractor/dropbox.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class DropboxIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.dropbox.com/s/0qr9sai2veej4f8/THE_DOCTOR_GAMES.mp4',
|
||||||
|
'md5': '8ae17c51172fb7f93bdd6a214cc8c896',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '0qr9sai2veej4f8',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'THE_DOCTOR_GAMES'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
title = os.path.splitext(mobj.group('title'))[0]
|
||||||
|
video_url = url + '?dl=1'
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'url': video_url,
|
||||||
|
}
|
||||||
58
youtube_dl/extractor/elpais.py
Normal file
58
youtube_dl/extractor/elpais.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import unified_strdate
|
||||||
|
|
||||||
|
|
||||||
|
class ElPaisIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:[^.]+\.)?elpais\.com/.*/(?P<id>[^/#?]+)\.html(?:$|[?#])'
|
||||||
|
IE_DESC = 'El País'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://blogs.elpais.com/la-voz-de-inaki/2014/02/tiempo-nuevo-recetas-viejas.html',
|
||||||
|
'md5': '98406f301f19562170ec071b83433d55',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'tiempo-nuevo-recetas-viejas',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Tiempo nuevo, recetas viejas',
|
||||||
|
'description': 'De lunes a viernes, a partir de las ocho de la mañana, Iñaki Gabilondo nos cuenta su visión de la actualidad nacional e internacional.',
|
||||||
|
'upload_date': '20140206',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
prefix = self._html_search_regex(
|
||||||
|
r'var url_cache = "([^"]+)";', webpage, 'URL prefix')
|
||||||
|
video_suffix = self._search_regex(
|
||||||
|
r"URLMediaFile = url_cache \+ '([^']+)'", webpage, 'video URL')
|
||||||
|
video_url = prefix + video_suffix
|
||||||
|
thumbnail_suffix = self._search_regex(
|
||||||
|
r"URLMediaStill = url_cache \+ '([^']+)'", webpage, 'thumbnail URL',
|
||||||
|
fatal=False)
|
||||||
|
thumbnail = (
|
||||||
|
None if thumbnail_suffix is None
|
||||||
|
else prefix + thumbnail_suffix)
|
||||||
|
title = self._html_search_regex(
|
||||||
|
'<h2 class="entry-header entry-title.*?>(.*?)</h2>',
|
||||||
|
webpage, 'title')
|
||||||
|
date_str = self._search_regex(
|
||||||
|
r'<p class="date-header date-int updated"\s+title="([^"]+)">',
|
||||||
|
webpage, 'upload date', fatal=False)
|
||||||
|
upload_date = (None if date_str is None else unified_strdate(date_str))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': title,
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
import json
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_str,
|
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@@ -11,70 +11,68 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class EscapistIE(InfoExtractor):
|
class EscapistIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
|
_VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<id>[0-9]+)-'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
||||||
u'file': u'6618-Breaking-Down-Baldurs-Gate.mp4',
|
'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
|
||||||
u'md5': u'ab3a706c681efca53f0a35f1415cf0d1',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '6618',
|
||||||
u"description": u"Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
|
'ext': 'mp4',
|
||||||
u"uploader": u"the-escapist-presents",
|
'description': "Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
|
||||||
u"title": u"Breaking Down Baldur's Gate"
|
'uploader': 'the-escapist-presents',
|
||||||
|
'title': "Breaking Down Baldur's Gate",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
showName = mobj.group('showname')
|
showName = mobj.group('showname')
|
||||||
videoId = mobj.group('episode')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
self.report_extraction(videoId)
|
self.report_extraction(video_id)
|
||||||
webpage = self._download_webpage(url, videoId)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
videoDesc = self._html_search_regex(
|
videoDesc = self._html_search_regex(
|
||||||
r'<meta name="description" content="([^"]*)"',
|
r'<meta name="description" content="([^"]*)"',
|
||||||
webpage, u'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
|
|
||||||
playerUrl = self._og_search_video_url(webpage, name=u'player URL')
|
playerUrl = self._og_search_video_url(webpage, name=u'player URL')
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<meta name="title" content="([^"]*)"',
|
r'<meta name="title" content="([^"]*)"',
|
||||||
webpage, u'title').split(' : ')[-1]
|
webpage, 'title').split(' : ')[-1]
|
||||||
|
|
||||||
configUrl = self._search_regex('config=(.*)$', playerUrl, u'config URL')
|
configUrl = self._search_regex('config=(.*)$', playerUrl, 'config URL')
|
||||||
configUrl = compat_urllib_parse.unquote(configUrl)
|
configUrl = compat_urllib_parse.unquote(configUrl)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
def _add_format(name, cfgurl):
|
def _add_format(name, cfgurl, quality):
|
||||||
configJSON = self._download_webpage(
|
config = self._download_json(
|
||||||
cfgurl, videoId,
|
cfgurl, video_id,
|
||||||
u'Downloading ' + name + ' configuration',
|
'Downloading ' + name + ' configuration',
|
||||||
u'Unable to download ' + name + ' configuration')
|
'Unable to download ' + name + ' configuration',
|
||||||
|
transform_source=lambda s: s.replace("'", '"'))
|
||||||
|
|
||||||
# Technically, it's JavaScript, not JSON
|
|
||||||
configJSON = configJSON.replace("'", '"')
|
|
||||||
|
|
||||||
try:
|
|
||||||
config = json.loads(configJSON)
|
|
||||||
except (ValueError,) as err:
|
|
||||||
raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
|
|
||||||
playlist = config['playlist']
|
playlist = config['playlist']
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': playlist[1]['url'],
|
'url': playlist[1]['url'],
|
||||||
'format_id': name,
|
'format_id': name,
|
||||||
|
'quality': quality,
|
||||||
})
|
})
|
||||||
|
|
||||||
_add_format(u'normal', configUrl)
|
_add_format('normal', configUrl, quality=0)
|
||||||
hq_url = (configUrl +
|
hq_url = (configUrl +
|
||||||
('&hq=1' if '?' in configUrl else configUrl + '?hq=1'))
|
('&hq=1' if '?' in configUrl else configUrl + '?hq=1'))
|
||||||
try:
|
try:
|
||||||
_add_format(u'hq', hq_url)
|
_add_format('hq', hq_url, quality=1)
|
||||||
except ExtractorError:
|
except ExtractorError:
|
||||||
pass # That's fine, we'll just use normal quality
|
pass # That's fine, we'll just use normal quality
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': videoId,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'uploader': showName,
|
'uploader': showName,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|||||||
69
youtube_dl/extractor/everyonesmixtape.py
Normal file
69
youtube_dl/extractor/everyonesmixtape.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_request,
|
||||||
|
ExtractorError,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class EveryonesMixtapeIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
|
||||||
|
'file': '5bfseWNmlds.mp4',
|
||||||
|
"info_dict": {
|
||||||
|
"title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
|
||||||
|
"uploader": "FKR.TV",
|
||||||
|
"uploader_id": "frenchkissrecords",
|
||||||
|
"description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
|
||||||
|
"upload_date": "20081015"
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True, # This is simply YouTube
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
playlist_id = mobj.group('id')
|
||||||
|
|
||||||
|
pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
|
||||||
|
pllist_req = compat_urllib_request.Request(pllist_url)
|
||||||
|
pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
|
||||||
|
|
||||||
|
playlist_list = self._download_json(
|
||||||
|
pllist_req, playlist_id, note='Downloading playlist metadata')
|
||||||
|
try:
|
||||||
|
playlist_no = next(playlist['id']
|
||||||
|
for playlist in playlist_list
|
||||||
|
if playlist['code'] == playlist_id)
|
||||||
|
except StopIteration:
|
||||||
|
raise ExtractorError('Playlist id not found')
|
||||||
|
|
||||||
|
pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
|
||||||
|
pl_req = compat_urllib_request.Request(pl_url)
|
||||||
|
pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
|
||||||
|
playlist = self._download_json(
|
||||||
|
pl_req, playlist_id, note='Downloading playlist info')
|
||||||
|
|
||||||
|
entries = [{
|
||||||
|
'_type': 'url',
|
||||||
|
'url': t['url'],
|
||||||
|
'title': t['title'],
|
||||||
|
} for t in playlist['tracks']]
|
||||||
|
|
||||||
|
if mobj.group('songnr'):
|
||||||
|
songnr = int(mobj.group('songnr')) - 1
|
||||||
|
return entries[songnr]
|
||||||
|
|
||||||
|
playlist_title = playlist['mixData']['name']
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': playlist_id,
|
||||||
|
'title': playlist_title,
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
@@ -1,56 +1,58 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class ExfmIE(InfoExtractor):
|
class ExfmIE(InfoExtractor):
|
||||||
IE_NAME = u'exfm'
|
IE_NAME = 'exfm'
|
||||||
IE_DESC = u'ex.fm'
|
IE_DESC = 'ex.fm'
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?ex\.fm/song/([^/]+)'
|
_VALID_URL = r'http://(?:www\.)?ex\.fm/song/(?P<id>[^/]+)'
|
||||||
_SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
|
_SOUNDCLOUD_URL = r'http://(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
u'url': u'http://ex.fm/song/eh359',
|
'url': 'http://ex.fm/song/eh359',
|
||||||
u'file': u'44216187.mp3',
|
'md5': 'e45513df5631e6d760970b14cc0c11e7',
|
||||||
u'md5': u'e45513df5631e6d760970b14cc0c11e7',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '44216187',
|
||||||
u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive",
|
'ext': 'mp3',
|
||||||
u"uploader": u"deadjournalist",
|
'title': 'Test House "Love Is Not Enough" (Extended Mix) DeadJournalist Exclusive',
|
||||||
u'upload_date': u'20120424',
|
'uploader': 'deadjournalist',
|
||||||
u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
|
'upload_date': '20120424',
|
||||||
|
'description': 'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
|
||||||
},
|
},
|
||||||
u'note': u'Soundcloud song',
|
'note': 'Soundcloud song',
|
||||||
u'skip': u'The site is down too often',
|
'skip': 'The site is down too often',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u'url': u'http://ex.fm/song/wddt8',
|
'url': 'http://ex.fm/song/wddt8',
|
||||||
u'file': u'wddt8.mp3',
|
'md5': '966bd70741ac5b8570d8e45bfaed3643',
|
||||||
u'md5': u'966bd70741ac5b8570d8e45bfaed3643',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': 'wddt8',
|
||||||
u'title': u'Safe and Sound',
|
'ext': 'mp3',
|
||||||
u'uploader': u'Capital Cities',
|
'title': 'Safe and Sound',
|
||||||
|
'uploader': 'Capital Cities',
|
||||||
},
|
},
|
||||||
u'skip': u'The site is down too often',
|
'skip': 'The site is down too often',
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
song_id = mobj.group(1)
|
song_id = mobj.group('id')
|
||||||
info_url = "http://ex.fm/api/v3/song/%s" %(song_id)
|
info_url = "http://ex.fm/api/v3/song/%s" % song_id
|
||||||
webpage = self._download_webpage(info_url, song_id)
|
info = self._download_json(info_url, song_id)['song']
|
||||||
info = json.loads(webpage)
|
song_url = info['url']
|
||||||
song_url = info['song']['url']
|
|
||||||
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
|
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
|
||||||
self.to_screen('Soundcloud song detected')
|
self.to_screen('Soundcloud song detected')
|
||||||
return self.url_result(song_url.replace('/stream', ''), 'Soundcloud')
|
return self.url_result(song_url.replace('/stream', ''), 'Soundcloud')
|
||||||
return [{
|
return {
|
||||||
'id': song_id,
|
'id': song_id,
|
||||||
'url': song_url,
|
'url': song_url,
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': info['song']['title'],
|
'title': info['title'],
|
||||||
'thumbnail': info['song']['image']['large'],
|
'thumbnail': info['image']['large'],
|
||||||
'uploader': info['song']['artist'],
|
'uploader': info['artist'],
|
||||||
'view_count': info['song']['loved_count'],
|
'view_count': info['loved_count'],
|
||||||
}]
|
}
|
||||||
|
|||||||
@@ -17,7 +17,12 @@ from ..utils import (
|
|||||||
class FacebookIE(InfoExtractor):
|
class FacebookIE(InfoExtractor):
|
||||||
"""Information Extractor for Facebook"""
|
"""Information Extractor for Facebook"""
|
||||||
|
|
||||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:[^#?]*#!/)?(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
|
_VALID_URL = r'''(?x)
|
||||||
|
(?:https?://)?(?:\w+\.)?facebook\.com/
|
||||||
|
(?:[^#?]*\#!/)?
|
||||||
|
(?:video/video\.php|photo\.php|video/embed)\?(?:.*?)
|
||||||
|
(?:v|video_id)=(?P<id>[0-9]+)
|
||||||
|
(?:.*)'''
|
||||||
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
|
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
|
||||||
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
|
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
|
||||||
_NETRC_MACHINE = 'facebook'
|
_NETRC_MACHINE = 'facebook'
|
||||||
@@ -90,7 +95,7 @@ class FacebookIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||||
video_id = mobj.group('ID')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
|
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|||||||
38
youtube_dl/extractor/firstpost.py
Normal file
38
youtube_dl/extractor/firstpost.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class FirstpostIE(InfoExtractor):
|
||||||
|
IE_NAME = 'Firstpost.com'
|
||||||
|
_VALID_URL = r'http://(?:www\.)?firstpost\.com/[^/]+/.*-(?P<id>[0-9]+)\.html'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.firstpost.com/india/india-to-launch-indigenous-aircraft-carrier-monday-1025403.html',
|
||||||
|
'md5': 'ee9114957692f01fb1263ed87039112a',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1025403',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'India to launch indigenous aircraft carrier INS Vikrant today',
|
||||||
|
'description': 'Its flight deck is over twice the size of a football field, its power unit can light up the entire Kochi city and the cabling is enough to cover the distance between here to Delhi.',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
video_url = self._html_search_regex(
|
||||||
|
r'<div.*?name="div_video".*?flashvars="([^"]+)">',
|
||||||
|
webpage, 'video URL')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': self._og_search_title(webpage),
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
}
|
||||||
60
youtube_dl/extractor/firsttv.py
Normal file
60
youtube_dl/extractor/firsttv.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import int_or_none
|
||||||
|
|
||||||
|
|
||||||
|
class FirstTVIE(InfoExtractor):
|
||||||
|
IE_NAME = 'firsttv'
|
||||||
|
IE_DESC = 'Видеоархив - Первый канал'
|
||||||
|
_VALID_URL = r'http://(?:www\.)?1tv\.ru/videoarchive/(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.1tv.ru/videoarchive/73390',
|
||||||
|
'md5': '3de6390cf0cca4a5eae1d1d83895e5ad',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '73390',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Олимпийские канатные дороги',
|
||||||
|
'description': 'md5:cc730d2bf4215463e37fff6a1e277b13',
|
||||||
|
'thumbnail': 'http://img1.1tv.ru/imgsize640x360/PR20140210114657.JPG',
|
||||||
|
'duration': 149,
|
||||||
|
},
|
||||||
|
'skip': 'Only works from Russia',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id, 'Downloading page')
|
||||||
|
|
||||||
|
video_url = self._html_search_regex(
|
||||||
|
r'''(?s)jwplayer\('flashvideoportal_1'\)\.setup\({.*?'file': '([^']+)'.*?}\);''', webpage, 'video URL')
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<div class="tv_translation">\s*<h1><a href="[^"]+">([^<]*)</a>', webpage, 'title')
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'<div class="descr">\s*<div> </div>\s*<p>([^<]*)</p></div>', webpage, 'description', fatal=False)
|
||||||
|
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
|
||||||
|
|
||||||
|
like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
|
||||||
|
webpage, 'like count', fatal=False)
|
||||||
|
dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
|
||||||
|
webpage, 'dislike count', fatal=False)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'duration': int_or_none(duration),
|
||||||
|
'like_count': int_or_none(like_count),
|
||||||
|
'dislike_count': int_or_none(dislike_count),
|
||||||
|
}
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -11,13 +13,13 @@ class FlickrIE(InfoExtractor):
|
|||||||
"""Information Extractor for Flickr videos"""
|
"""Information Extractor for Flickr videos"""
|
||||||
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
|
'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
|
||||||
u'file': u'5645318632.mp4',
|
'file': '5645318632.mp4',
|
||||||
u'md5': u'6fdc01adbc89d72fc9c4f15b4a4ba87b',
|
'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"description": u"Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
||||||
u"uploader_id": u"forestwander-nature-pictures",
|
"uploader_id": "forestwander-nature-pictures",
|
||||||
u"title": u"Dark Hollow Waterfalls"
|
"title": "Dark Hollow Waterfalls"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -29,13 +31,13 @@ class FlickrIE(InfoExtractor):
|
|||||||
webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
|
webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
|
||||||
webpage = self._download_webpage(webpage_url, video_id)
|
webpage = self._download_webpage(webpage_url, video_id)
|
||||||
|
|
||||||
secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret')
|
secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, 'secret')
|
||||||
|
|
||||||
first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
|
first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
|
||||||
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
||||||
|
|
||||||
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
||||||
first_xml, u'node_id')
|
first_xml, 'node_id')
|
||||||
|
|
||||||
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
||||||
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
||||||
@@ -44,7 +46,7 @@ class FlickrIE(InfoExtractor):
|
|||||||
|
|
||||||
mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
|
mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ExtractorError(u'Unable to extract video url')
|
raise ExtractorError('Unable to extract video url')
|
||||||
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
||||||
|
|
||||||
return [{
|
return [{
|
||||||
|
|||||||
95
youtube_dl/extractor/fourtube.py
Normal file
95
youtube_dl/extractor/fourtube.py
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_request,
|
||||||
|
unified_strdate,
|
||||||
|
str_to_int,
|
||||||
|
parse_duration,
|
||||||
|
)
|
||||||
|
from youtube_dl.utils import clean_html
|
||||||
|
|
||||||
|
|
||||||
|
class FourTubeIE(InfoExtractor):
|
||||||
|
IE_NAME = '4tube'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black',
|
||||||
|
'md5': '6516c8ac63b03de06bc8eac14362db4f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '209733',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Hot Babe Holly Michaels gets her ass stuffed by black',
|
||||||
|
'uploader': 'WCP Club',
|
||||||
|
'uploader_id': 'wcp-club',
|
||||||
|
'upload_date': '20131031',
|
||||||
|
'duration': 583,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
webpage_url = 'http://www.4tube.com/videos/' + video_id
|
||||||
|
webpage = self._download_webpage(webpage_url, video_id)
|
||||||
|
|
||||||
|
self.report_extraction(video_id)
|
||||||
|
|
||||||
|
playlist_json = self._html_search_regex(r'var playerConfigPlaylist\s+=\s+([^;]+)', webpage, 'Playlist')
|
||||||
|
media_id = self._search_regex(r'idMedia:\s*(\d+)', playlist_json, 'Media Id')
|
||||||
|
sources = self._search_regex(r'sources:\s*\[([^\]]*)\]', playlist_json, 'Sources').split(',')
|
||||||
|
title = self._search_regex(r'title:\s*"([^"]*)', playlist_json, 'Title')
|
||||||
|
thumbnail_url = self._search_regex(r'image:\s*"([^"]*)', playlist_json, 'Thumbnail', fatal=False)
|
||||||
|
|
||||||
|
uploader_str = self._search_regex(r'<span>Uploaded by</span>(.*?)<span>', webpage, 'uploader', fatal=False)
|
||||||
|
mobj = re.search(r'<a href="/sites/(?P<id>[^"]+)"><strong>(?P<name>[^<]+)</strong></a>', uploader_str)
|
||||||
|
(uploader, uploader_id) = (mobj.group('name'), mobj.group('id')) if mobj else (clean_html(uploader_str), None)
|
||||||
|
|
||||||
|
upload_date = None
|
||||||
|
view_count = None
|
||||||
|
duration = None
|
||||||
|
description = self._html_search_meta('description', webpage, 'description')
|
||||||
|
if description:
|
||||||
|
upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
|
||||||
|
fatal=False)
|
||||||
|
if upload_date:
|
||||||
|
upload_date = unified_strdate(upload_date)
|
||||||
|
view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
|
||||||
|
if view_count:
|
||||||
|
view_count = str_to_int(view_count)
|
||||||
|
duration = parse_duration(self._search_regex(r'Length: (\d+m\d+s)', description, 'duration', fatal=False))
|
||||||
|
|
||||||
|
token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
|
||||||
|
headers = {
|
||||||
|
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||||
|
b'Origin': b'http://www.4tube.com',
|
||||||
|
}
|
||||||
|
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
|
||||||
|
tokens = self._download_json(token_req, video_id)
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'url': tokens[format]['token'],
|
||||||
|
'format_id': format + 'p',
|
||||||
|
'resolution': format + 'p',
|
||||||
|
'quality': int(format),
|
||||||
|
} for format in sources]
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnail': thumbnail_url,
|
||||||
|
'uploader': uploader,
|
||||||
|
'uploader_id': uploader_id,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
'view_count': view_count,
|
||||||
|
'duration': duration,
|
||||||
|
'age_limit': 18,
|
||||||
|
'webpage_url': webpage_url,
|
||||||
|
}
|
||||||
38
youtube_dl/extractor/franceinter.py
Normal file
38
youtube_dl/extractor/franceinter.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class FranceInterIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
|
||||||
|
'file': '793962.mp3',
|
||||||
|
'md5': '4764932e466e6f6c79c317d2e74f6884',
|
||||||
|
"info_dict": {
|
||||||
|
"title": "L’Histoire dans les jeux vidéo",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
|
||||||
|
path = self._search_regex(
|
||||||
|
r'&urlAOD=(.*?)&startTime', webpage, 'video url')
|
||||||
|
video_url = 'http://www.franceinter.fr/' + path
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': [{
|
||||||
|
'url': video_url,
|
||||||
|
'vcodec': 'none',
|
||||||
|
}],
|
||||||
|
'title': title,
|
||||||
|
}
|
||||||
@@ -1,4 +1,7 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@@ -30,7 +33,7 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class PluzzIE(FranceTVBaseInfoExtractor):
|
class PluzzIE(FranceTVBaseInfoExtractor):
|
||||||
IE_NAME = u'pluzz.francetv.fr'
|
IE_NAME = 'pluzz.francetv.fr'
|
||||||
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
|
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
|
||||||
|
|
||||||
# Can't use tests, videos expire in 7 days
|
# Can't use tests, videos expire in 7 days
|
||||||
@@ -44,17 +47,17 @@ class PluzzIE(FranceTVBaseInfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||||
IE_NAME = u'francetvinfo.fr'
|
IE_NAME = 'francetvinfo.fr'
|
||||||
_VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+)\.html'
|
_VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+)\.html'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
|
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
|
||||||
u'file': u'84981923.mp4',
|
'file': '84981923.mp4',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Soir 3',
|
'title': 'Soir 3',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,13 +65,13 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
page_title = mobj.group('title')
|
page_title = mobj.group('title')
|
||||||
webpage = self._download_webpage(url, page_title)
|
webpage = self._download_webpage(url, page_title)
|
||||||
video_id = self._search_regex(r'id-video=(\d+?)"', webpage, u'video id')
|
video_id = self._search_regex(r'id-video=(\d+?)[@"]', webpage, 'video id')
|
||||||
return self._extract_video(video_id)
|
return self._extract_video(video_id)
|
||||||
|
|
||||||
|
|
||||||
class FranceTVIE(FranceTVBaseInfoExtractor):
|
class FranceTVIE(FranceTVBaseInfoExtractor):
|
||||||
IE_NAME = u'francetv'
|
IE_NAME = 'francetv'
|
||||||
IE_DESC = u'France 2, 3, 4, 5 and Ô'
|
IE_DESC = 'France 2, 3, 4, 5 and Ô'
|
||||||
_VALID_URL = r'''(?x)https?://www\.france[2345o]\.fr/
|
_VALID_URL = r'''(?x)https?://www\.france[2345o]\.fr/
|
||||||
(?:
|
(?:
|
||||||
emissions/.*?/(videos|emissions)/(?P<id>[^/?]+)
|
emissions/.*?/(videos|emissions)/(?P<id>[^/?]+)
|
||||||
@@ -78,73 +81,73 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
|
|||||||
_TESTS = [
|
_TESTS = [
|
||||||
# france2
|
# france2
|
||||||
{
|
{
|
||||||
u'url': u'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
|
'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
|
||||||
u'file': u'75540104.mp4',
|
'file': '75540104.mp4',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'13h15, le samedi...',
|
'title': '13h15, le samedi...',
|
||||||
u'description': u'md5:2e5b58ba7a2d3692b35c792be081a03d',
|
'description': 'md5:2e5b58ba7a2d3692b35c792be081a03d',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# france3
|
# france3
|
||||||
{
|
{
|
||||||
u'url': u'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
|
'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
|
'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
|
||||||
u'ext': u'flv',
|
'ext': 'flv',
|
||||||
u'title': u'Le scandale du prix des médicaments',
|
'title': 'Le scandale du prix des médicaments',
|
||||||
u'description': u'md5:1384089fbee2f04fc6c9de025ee2e9ce',
|
'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# rtmp download
|
# rtmp download
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# france4
|
# france4
|
||||||
{
|
{
|
||||||
u'url': u'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
|
'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
|
'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
|
||||||
u'ext': u'flv',
|
'ext': 'flv',
|
||||||
u'title': u'Hero Corp Making of - Extrait 1',
|
'title': 'Hero Corp Making of - Extrait 1',
|
||||||
u'description': u'md5:c87d54871b1790679aec1197e73d650a',
|
'description': 'md5:c87d54871b1790679aec1197e73d650a',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# rtmp download
|
# rtmp download
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# france5
|
# france5
|
||||||
{
|
{
|
||||||
u'url': u'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
|
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'92837968',
|
'id': '92837968',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'C à dire ?!',
|
'title': 'C à dire ?!',
|
||||||
u'description': u'md5:fb1db1cbad784dcce7c7a7bd177c8e2f',
|
'description': 'md5:fb1db1cbad784dcce7c7a7bd177c8e2f',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# franceo
|
# franceo
|
||||||
{
|
{
|
||||||
u'url': u'http://www.franceo.fr/jt/info-afrique/04-12-2013',
|
'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'92327925',
|
'id': '92327925',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'Infô-Afrique',
|
'title': 'Infô-Afrique',
|
||||||
u'description': u'md5:ebf346da789428841bee0fd2a935ea55',
|
'description': 'md5:ebf346da789428841bee0fd2a935ea55',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
u'skip': u'The id changes frequently',
|
'skip': 'The id changes frequently',
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -160,27 +163,28 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
|
|||||||
'\.fr/\?id-video=([^"/&]+)'),
|
'\.fr/\?id-video=([^"/&]+)'),
|
||||||
(r'<a class="video" id="ftv_player_(.+?)"'),
|
(r'<a class="video" id="ftv_player_(.+?)"'),
|
||||||
]
|
]
|
||||||
video_id = self._html_search_regex(id_res, webpage, u'video ID')
|
video_id = self._html_search_regex(id_res, webpage, 'video ID')
|
||||||
else:
|
else:
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
return self._extract_video(video_id)
|
return self._extract_video(video_id)
|
||||||
|
|
||||||
|
|
||||||
class GenerationQuoiIE(InfoExtractor):
|
class GenerationQuoiIE(InfoExtractor):
|
||||||
IE_NAME = u'france2.fr:generation-quoi'
|
IE_NAME = 'france2.fr:generation-quoi'
|
||||||
_VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<name>.*)(\?|$)'
|
_VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<name>.*)(\?|$)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://generation-quoi.france2.fr/portrait/garde-a-vous',
|
'url': 'http://generation-quoi.france2.fr/portrait/garde-a-vous',
|
||||||
u'file': u'k7FJX8VBcvvLmX4wA5Q.mp4',
|
'file': 'k7FJX8VBcvvLmX4wA5Q.mp4',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Génération Quoi - Garde à Vous',
|
'title': 'Génération Quoi - Garde à Vous',
|
||||||
u'uploader': u'Génération Quoi',
|
'uploader': 'Génération Quoi',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
# It uses Dailymotion
|
# It uses Dailymotion
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
'skip': 'Only available from France',
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -191,3 +195,29 @@ class GenerationQuoiIE(InfoExtractor):
|
|||||||
info = json.loads(info_json)
|
info = json.loads(info_json)
|
||||||
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
|
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
|
||||||
ie='Dailymotion')
|
ie='Dailymotion')
|
||||||
|
|
||||||
|
|
||||||
|
class CultureboxIE(FranceTVBaseInfoExtractor):
|
||||||
|
IE_NAME = 'culturebox.francetvinfo.fr'
|
||||||
|
_VALID_URL = r'https?://culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'EV_6785',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Einstein on the beach au Théâtre du Châtelet',
|
||||||
|
'description': 'md5:9ce2888b1efefc617b5e58b3f6200eeb',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
name = mobj.group('name')
|
||||||
|
webpage = self._download_webpage(url, name)
|
||||||
|
video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, 'video id')
|
||||||
|
return self._extract_video(video_id)
|
||||||
|
|||||||
@@ -1,18 +1,21 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
|
||||||
|
|
||||||
class FreesoundIE(InfoExtractor):
|
class FreesoundIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:https?://)?(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.freesound.org/people/miklovan/sounds/194503/',
|
'url': 'http://www.freesound.org/people/miklovan/sounds/194503/',
|
||||||
u'file': u'194503.mp3',
|
'md5': '12280ceb42c81f19a515c745eae07650',
|
||||||
u'md5': u'12280ceb42c81f19a515c745eae07650',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '194503',
|
||||||
u"title": u"gulls in the city.wav",
|
'ext': 'mp3',
|
||||||
u"uploader" : u"miklovan",
|
'title': 'gulls in the city.wav',
|
||||||
u'description': u'the sounds of seagulls in the city',
|
'uploader': 'miklovan',
|
||||||
|
'description': 'the sounds of seagulls in the city',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -20,17 +23,17 @@ class FreesoundIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
music_id = mobj.group('id')
|
music_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, music_id)
|
webpage = self._download_webpage(url, music_id)
|
||||||
title = self._html_search_regex(r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
|
title = self._html_search_regex(
|
||||||
|
r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
|
||||||
webpage, 'music title', flags=re.DOTALL)
|
webpage, 'music title', flags=re.DOTALL)
|
||||||
music_url = self._og_search_property('audio', webpage, 'music url')
|
description = self._html_search_regex(
|
||||||
description = self._html_search_regex(r'<div id="sound_description">(.*?)</div>',
|
r'<div id="sound_description">(.*?)</div>', webpage, 'description',
|
||||||
webpage, 'description', fatal=False, flags=re.DOTALL)
|
fatal=False, flags=re.DOTALL)
|
||||||
|
|
||||||
return [{
|
return {
|
||||||
'id': music_id,
|
'id': music_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'url': music_url,
|
'url': self._og_search_property('audio', webpage, 'music url'),
|
||||||
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
|
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
|
||||||
'ext': determine_ext(music_url),
|
|
||||||
'description': description,
|
'description': description,
|
||||||
}]
|
}
|
||||||
|
|||||||
37
youtube_dl/extractor/freespeech.py
Normal file
37
youtube_dl/extractor/freespeech.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class FreespeechIE(InfoExtractor):
|
||||||
|
IE_NAME = 'freespeech.org'
|
||||||
|
_VALID_URL = r'https://www\.freespeech\.org/video/(?P<title>.+)'
|
||||||
|
_TEST = {
|
||||||
|
'add_ie': ['Youtube'],
|
||||||
|
'url': 'https://www.freespeech.org/video/obama-romney-campaign-colorado-ahead-debate-0',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'poKsVCZ64uU',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Obama, Romney Campaign in Colorado Ahead of Debate',
|
||||||
|
'description': 'Obama, Romney Campaign in Colorado Ahead of Debate',
|
||||||
|
'uploader': 'freespeechtv',
|
||||||
|
'uploader_id': 'freespeechtv',
|
||||||
|
'upload_date': '20121002',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
title = mobj.group('title')
|
||||||
|
webpage = self._download_webpage(url, title)
|
||||||
|
info_json = self._search_regex(r'jQuery.extend\(Drupal.settings, ({.*?})\);', webpage, 'info')
|
||||||
|
info = json.loads(info_json)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url',
|
||||||
|
'url': info['jw_player']['basic_video_node_player']['file'],
|
||||||
|
'ie_key': 'Youtube',
|
||||||
|
}
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -6,13 +8,16 @@ from .common import InfoExtractor
|
|||||||
class FunnyOrDieIE(InfoExtractor):
|
class FunnyOrDieIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
||||||
u'file': u'0732f586d7.mp4',
|
'file': '0732f586d7.mp4',
|
||||||
u'md5': u'f647e9e90064b53b6e046e75d0241fbd',
|
'md5': 'f647e9e90064b53b6e046e75d0241fbd',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"description": u"Lyrics changed to match the video. Spoken cameo by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a concept by Dustin McLean (DustFilms.com). Performed, edited, and written by David A. Scott.",
|
'description': ('Lyrics changed to match the video. Spoken cameo '
|
||||||
u"title": u"Heart-Shaped Box: Literal Video Version"
|
'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a '
|
||||||
}
|
'concept by Dustin McLean (DustFilms.com). Performed, edited, '
|
||||||
|
'and written by David A. Scott.'),
|
||||||
|
'title': 'Heart-Shaped Box: Literal Video Version',
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -23,13 +28,12 @@ class FunnyOrDieIE(InfoExtractor):
|
|||||||
|
|
||||||
video_url = self._search_regex(
|
video_url = self._search_regex(
|
||||||
[r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''],
|
[r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''],
|
||||||
webpage, u'video URL', flags=re.DOTALL)
|
webpage, 'video URL', flags=re.DOTALL)
|
||||||
|
|
||||||
info = {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': self._og_search_title(webpage),
|
'title': self._og_search_title(webpage),
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
}
|
}
|
||||||
return [info]
|
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@@ -13,12 +15,12 @@ from ..utils import (
|
|||||||
class GameSpotIE(InfoExtractor):
|
class GameSpotIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
|
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u"url": u"http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/",
|
"url": "http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/",
|
||||||
u"file": u"gs-2300-6410818.mp4",
|
"file": "gs-2300-6410818.mp4",
|
||||||
u"md5": u"b2a30deaa8654fcccd43713a6b6a4825",
|
"md5": "b2a30deaa8654fcccd43713a6b6a4825",
|
||||||
u"info_dict": {
|
"info_dict": {
|
||||||
u"title": u"Arma 3 - Community Guide: SITREP I",
|
"title": "Arma 3 - Community Guide: SITREP I",
|
||||||
u'description': u'Check out this video where some of the basics of Arma 3 is explained.',
|
'description': 'Check out this video where some of the basics of Arma 3 is explained.',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -26,7 +28,7 @@ class GameSpotIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
page_id = mobj.group('page_id')
|
page_id = mobj.group('page_id')
|
||||||
webpage = self._download_webpage(url, page_id)
|
webpage = self._download_webpage(url, page_id)
|
||||||
data_video_json = self._search_regex(r'data-video=\'(.*?)\'', webpage, u'data video')
|
data_video_json = self._search_regex(r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
||||||
data_video = json.loads(unescapeHTML(data_video_json))
|
data_video = json.loads(unescapeHTML(data_video_json))
|
||||||
|
|
||||||
# Transform the manifest url to a link to the mp4 files
|
# Transform the manifest url to a link to the mp4 files
|
||||||
@@ -34,7 +36,7 @@ class GameSpotIE(InfoExtractor):
|
|||||||
f4m_url = data_video['videoStreams']['f4m_stream']
|
f4m_url = data_video['videoStreams']['f4m_stream']
|
||||||
f4m_path = compat_urlparse.urlparse(f4m_url).path
|
f4m_path = compat_urlparse.urlparse(f4m_url).path
|
||||||
QUALITIES_RE = r'((,\d+)+,?)'
|
QUALITIES_RE = r'((,\d+)+,?)'
|
||||||
qualities = self._search_regex(QUALITIES_RE, f4m_path, u'qualities').strip(',').split(',')
|
qualities = self._search_regex(QUALITIES_RE, f4m_path, 'qualities').strip(',').split(',')
|
||||||
http_path = f4m_path[1:].split('/', 1)[1]
|
http_path = f4m_path[1:].split('/', 1)[1]
|
||||||
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
||||||
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import re
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
|
|
||||||
@@ -6,22 +6,14 @@ from .mtv import MTVServicesInfoExtractor
|
|||||||
class GametrailersIE(MTVServicesInfoExtractor):
|
class GametrailersIE(MTVServicesInfoExtractor):
|
||||||
_VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
|
_VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
|
'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
|
||||||
u'file': u'70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.mp4',
|
'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7',
|
||||||
u'md5': u'4c8e67681a0ea7ec241e8c09b3ea8cf7',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d',
|
||||||
u'title': u'E3 2013: Debut Trailer',
|
'ext': 'mp4',
|
||||||
u'description': u'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!',
|
'title': 'E3 2013: Debut Trailer',
|
||||||
|
'description': 'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_FEED_URL = 'http://www.gametrailers.com/feeds/mrss'
|
_FEED_URL = 'http://www.gametrailers.com/feeds/mrss'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
mgid = self._search_regex([r'data-video="(?P<mgid>mgid:.*?)"',
|
|
||||||
r'data-contentId=\'(?P<mgid>mgid:.*?)\''],
|
|
||||||
webpage, u'mgid')
|
|
||||||
return self._get_videos_info(mgid)
|
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .youtube import YoutubeIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
@@ -11,64 +14,90 @@ from ..utils import (
|
|||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
HEADRequest,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
|
unified_strdate,
|
||||||
|
url_basename,
|
||||||
)
|
)
|
||||||
from .brightcove import BrightcoveIE
|
from .brightcove import BrightcoveIE
|
||||||
|
from .ooyala import OoyalaIE
|
||||||
|
|
||||||
|
|
||||||
class GenericIE(InfoExtractor):
|
class GenericIE(InfoExtractor):
|
||||||
IE_DESC = u'Generic downloader that works on some sites'
|
IE_DESC = 'Generic downloader that works on some sites'
|
||||||
_VALID_URL = r'.*'
|
_VALID_URL = r'.*'
|
||||||
IE_NAME = u'generic'
|
IE_NAME = 'generic'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
|
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
|
||||||
u'file': u'13601338388002.mp4',
|
'file': '13601338388002.mp4',
|
||||||
u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd',
|
'md5': '6e15c93721d7ec9e9ca3fdbf07982cfd',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"uploader": u"www.hodiho.fr",
|
'uploader': 'www.hodiho.fr',
|
||||||
u"title": u"R\u00e9gis plante sa Jeep"
|
'title': 'R\u00e9gis plante sa Jeep',
|
||||||
}
|
|
||||||
},
|
|
||||||
# embedded vimeo video
|
|
||||||
{
|
|
||||||
u'add_ie': ['Vimeo'],
|
|
||||||
u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references',
|
|
||||||
u'file': u'22444065.mp4',
|
|
||||||
u'md5': u'2903896e23df39722c33f015af0666e2',
|
|
||||||
u'info_dict': {
|
|
||||||
u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011',
|
|
||||||
u"uploader_id": u"skillsmatter",
|
|
||||||
u"uploader": u"Skills Matter",
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
# bandcamp page with custom domain
|
# bandcamp page with custom domain
|
||||||
{
|
{
|
||||||
u'add_ie': ['Bandcamp'],
|
'add_ie': ['Bandcamp'],
|
||||||
u'url': u'http://bronyrock.com/track/the-pony-mash',
|
'url': 'http://bronyrock.com/track/the-pony-mash',
|
||||||
u'file': u'3235767654.mp3',
|
'file': '3235767654.mp3',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'The Pony Mash',
|
'title': 'The Pony Mash',
|
||||||
u'uploader': u'M_Pallante',
|
'uploader': 'M_Pallante',
|
||||||
},
|
},
|
||||||
u'skip': u'There is a limit of 200 free downloads / month for the test song',
|
'skip': 'There is a limit of 200 free downloads / month for the test song',
|
||||||
},
|
},
|
||||||
# embedded brightcove video
|
# embedded brightcove video
|
||||||
# it also tests brightcove videos that need to set the 'Referer' in the
|
# it also tests brightcove videos that need to set the 'Referer' in the
|
||||||
# http requests
|
# http requests
|
||||||
{
|
{
|
||||||
u'add_ie': ['Brightcove'],
|
'add_ie': ['Brightcove'],
|
||||||
u'url': u'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
|
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'2765128793001',
|
'id': '2765128793001',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'Le cours de bourse : l’analyse technique',
|
'title': 'Le cours de bourse : l’analyse technique',
|
||||||
u'description': u'md5:7e9ad046e968cb2d1114004aba466fd9',
|
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
|
||||||
u'uploader': u'BFM BUSINESS',
|
'uploader': 'BFM BUSINESS',
|
||||||
},
|
},
|
||||||
u'params': {
|
'params': {
|
||||||
u'skip_download': True,
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/2253
|
||||||
|
'url': 'http://bcove.me/i6nfkrc3',
|
||||||
|
'file': '3101154703001.mp4',
|
||||||
|
'md5': '0ba9446db037002366bab3b3eb30c88c',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Still no power',
|
||||||
|
'uploader': 'thestar.com',
|
||||||
|
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
|
||||||
|
},
|
||||||
|
'add_ie': ['Brightcove'],
|
||||||
|
},
|
||||||
|
# Direct link to a video
|
||||||
|
{
|
||||||
|
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
|
||||||
|
'file': 'trailer.mp4',
|
||||||
|
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'trailer',
|
||||||
|
'title': 'trailer',
|
||||||
|
'upload_date': '20100513',
|
||||||
|
}
|
||||||
|
},
|
||||||
|
# ooyala video
|
||||||
|
{
|
||||||
|
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
|
||||||
|
'file': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ.mp4',
|
||||||
|
'md5': '5644c6ca5d5782c1d0d350dad9bd840c',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '2cc213299525360.mov', # that's what we get
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
@@ -76,30 +105,27 @@ class GenericIE(InfoExtractor):
|
|||||||
def report_download_webpage(self, video_id):
|
def report_download_webpage(self, video_id):
|
||||||
"""Report webpage download."""
|
"""Report webpage download."""
|
||||||
if not self._downloader.params.get('test', False):
|
if not self._downloader.params.get('test', False):
|
||||||
self._downloader.report_warning(u'Falling back on generic information extractor.')
|
self._downloader.report_warning('Falling back on generic information extractor.')
|
||||||
super(GenericIE, self).report_download_webpage(video_id)
|
super(GenericIE, self).report_download_webpage(video_id)
|
||||||
|
|
||||||
def report_following_redirect(self, new_url):
|
def report_following_redirect(self, new_url):
|
||||||
"""Report information extraction."""
|
"""Report information extraction."""
|
||||||
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
|
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
|
||||||
|
|
||||||
def _test_redirect(self, url):
|
def _send_head(self, url):
|
||||||
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
||||||
class HeadRequest(compat_urllib_request.Request):
|
|
||||||
def get_method(self):
|
|
||||||
return "HEAD"
|
|
||||||
|
|
||||||
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
||||||
"""
|
"""
|
||||||
Subclass the HTTPRedirectHandler to make it use our
|
Subclass the HTTPRedirectHandler to make it use our
|
||||||
HeadRequest also on the redirected URL
|
HEADRequest also on the redirected URL
|
||||||
"""
|
"""
|
||||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||||
if code in (301, 302, 303, 307):
|
if code in (301, 302, 303, 307):
|
||||||
newurl = newurl.replace(' ', '%20')
|
newurl = newurl.replace(' ', '%20')
|
||||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||||
if k.lower() not in ("content-length", "content-type"))
|
if k.lower() not in ("content-length", "content-type"))
|
||||||
return HeadRequest(newurl,
|
return HEADRequest(newurl,
|
||||||
headers=newheaders,
|
headers=newheaders,
|
||||||
origin_req_host=req.get_origin_req_host(),
|
origin_req_host=req.get_origin_req_host(),
|
||||||
unverifiable=True)
|
unverifiable=True)
|
||||||
@@ -128,38 +154,68 @@ class GenericIE(InfoExtractor):
|
|||||||
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
|
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
|
||||||
opener.add_handler(handler())
|
opener.add_handler(handler())
|
||||||
|
|
||||||
response = opener.open(HeadRequest(url))
|
response = opener.open(HEADRequest(url))
|
||||||
if response is None:
|
if response is None:
|
||||||
raise ExtractorError(u'Invalid URL protocol')
|
raise ExtractorError('Invalid URL protocol')
|
||||||
new_url = response.geturl()
|
return response
|
||||||
|
|
||||||
if url == new_url:
|
|
||||||
return False
|
|
||||||
|
|
||||||
self.report_following_redirect(new_url)
|
|
||||||
return new_url
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
parsed_url = compat_urlparse.urlparse(url)
|
parsed_url = compat_urlparse.urlparse(url)
|
||||||
if not parsed_url.scheme:
|
if not parsed_url.scheme:
|
||||||
|
default_search = self._downloader.params.get('default_search')
|
||||||
|
if default_search is None:
|
||||||
|
default_search = 'auto'
|
||||||
|
|
||||||
|
if default_search == 'auto':
|
||||||
|
if '/' in url:
|
||||||
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||||
return self.url_result('http://' + url)
|
return self.url_result('http://' + url)
|
||||||
|
else:
|
||||||
|
return self.url_result('ytsearch:' + url)
|
||||||
|
else:
|
||||||
|
assert ':' in default_search
|
||||||
|
return self.url_result(default_search + url)
|
||||||
|
video_id = os.path.splitext(url.split('/')[-1])[0]
|
||||||
|
|
||||||
|
self.to_screen('%s: Requesting header' % video_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
new_url = self._test_redirect(url)
|
response = self._send_head(url)
|
||||||
if new_url:
|
|
||||||
return [self.url_result(new_url)]
|
# Check for redirect
|
||||||
|
new_url = response.geturl()
|
||||||
|
if url != new_url:
|
||||||
|
self.report_following_redirect(new_url)
|
||||||
|
return self.url_result(new_url)
|
||||||
|
|
||||||
|
# Check for direct link to a video
|
||||||
|
content_type = response.headers.get('Content-Type', '')
|
||||||
|
m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
|
||||||
|
if m:
|
||||||
|
upload_date = response.headers.get('Last-Modified')
|
||||||
|
if upload_date:
|
||||||
|
upload_date = unified_strdate(upload_date)
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': os.path.splitext(url_basename(url))[0],
|
||||||
|
'formats': [{
|
||||||
|
'format_id': m.group('format_id'),
|
||||||
|
'url': url,
|
||||||
|
'vcodec': 'none' if m.group('type') == 'audio' else None
|
||||||
|
}],
|
||||||
|
'upload_date': upload_date,
|
||||||
|
}
|
||||||
|
|
||||||
except compat_urllib_error.HTTPError:
|
except compat_urllib_error.HTTPError:
|
||||||
# This may be a stupid server that doesn't like HEAD, our UA, or so
|
# This may be a stupid server that doesn't like HEAD, our UA, or so
|
||||||
pass
|
pass
|
||||||
|
|
||||||
video_id = url.split('/')[-1]
|
|
||||||
try:
|
try:
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# since this is the last-resort InfoExtractor, if
|
# since this is the last-resort InfoExtractor, if
|
||||||
# this error is thrown, it'll be thrown here
|
# this error is thrown, it'll be thrown here
|
||||||
raise ExtractorError(u'Failed to download URL: %s' % url)
|
raise ExtractorError('Failed to download URL: %s' % url)
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
|
|
||||||
@@ -170,30 +226,50 @@ class GenericIE(InfoExtractor):
|
|||||||
# Video Title - Tagline | Site Name
|
# Video Title - Tagline | Site Name
|
||||||
# and so on and so forth; it's just not practical
|
# and so on and so forth; it's just not practical
|
||||||
video_title = self._html_search_regex(
|
video_title = self._html_search_regex(
|
||||||
r'(?s)<title>(.*?)</title>', webpage, u'video title',
|
r'(?s)<title>(.*?)</title>', webpage, 'video title',
|
||||||
default=u'video')
|
default='video')
|
||||||
|
|
||||||
# video uploader is domain name
|
# video uploader is domain name
|
||||||
video_uploader = self._search_regex(
|
video_uploader = self._search_regex(
|
||||||
r'^(?:https?://)?([^/]*)/.*', url, u'video uploader')
|
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
|
||||||
|
|
||||||
# Look for BrightCove:
|
# Look for BrightCove:
|
||||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
bc_urls = BrightcoveIE._extract_brightcove_urls(webpage)
|
||||||
if bc_url is not None:
|
if bc_urls:
|
||||||
self.to_screen(u'Brightcove video detected.')
|
self.to_screen('Brightcove video detected.')
|
||||||
return self.url_result(bc_url, 'Brightcove')
|
entries = [{
|
||||||
|
'_type': 'url',
|
||||||
|
'url': smuggle_url(bc_url, {'Referer': url}),
|
||||||
|
'ie_key': 'Brightcove'
|
||||||
|
} for bc_url in bc_urls]
|
||||||
|
|
||||||
# Look for embedded Vimeo player
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'title': video_title,
|
||||||
|
'id': video_id,
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Look for embedded (iframe) Vimeo player
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage)
|
r'<iframe[^>]+?src="((?:https?:)?//player\.vimeo\.com/video/.+?)"', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
player_url = unescapeHTML(mobj.group(1))
|
player_url = unescapeHTML(mobj.group(1))
|
||||||
surl = smuggle_url(player_url, {'Referer': url})
|
surl = smuggle_url(player_url, {'Referer': url})
|
||||||
return self.url_result(surl, 'Vimeo')
|
return self.url_result(surl, 'Vimeo')
|
||||||
|
|
||||||
|
# Look for embedded (swf embed) Vimeo player
|
||||||
|
mobj = re.search(
|
||||||
|
r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
|
||||||
|
if mobj:
|
||||||
|
return self.url_result(mobj.group(1), 'Vimeo')
|
||||||
|
|
||||||
# Look for embedded YouTube player
|
# Look for embedded YouTube player
|
||||||
matches = re.findall(
|
matches = re.findall(r'''(?x)
|
||||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/embed/.+?)\1', webpage)
|
(?:<iframe[^>]+?src=|embedSWF\(\s*)
|
||||||
|
(["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
|
||||||
|
(?:embed|v)/.+?)
|
||||||
|
\1''', webpage)
|
||||||
if matches:
|
if matches:
|
||||||
urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
|
urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
|
||||||
for tuppl in matches]
|
for tuppl in matches]
|
||||||
@@ -223,16 +299,12 @@ class GenericIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Look for embedded blip.tv player
|
# Look for embedded blip.tv player
|
||||||
mobj = re.search(r'<meta\s[^>]*https?://api.blip.tv/\w+/redirect/\w+/(\d+)', webpage)
|
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
return self.url_result('http://blip.tv/seo/-'+mobj.group(1), 'BlipTV')
|
return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
|
||||||
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*https?://(?:\w+\.)?blip.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', webpage)
|
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
player_url = 'http://blip.tv/play/%s.x?p=1' % mobj.group(1)
|
return self.url_result(mobj.group(1), 'BlipTV')
|
||||||
player_page = self._download_webpage(player_url, mobj.group(1))
|
|
||||||
blip_video_id = self._search_regex(r'data-episode-id="(\d+)', player_page, u'blip_video_id', fatal=False)
|
|
||||||
if blip_video_id:
|
|
||||||
return self.url_result('http://blip.tv/seo/-'+blip_video_id, 'BlipTV')
|
|
||||||
|
|
||||||
# Look for Bandcamp pages with custom domain
|
# Look for Bandcamp pages with custom domain
|
||||||
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
|
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
|
||||||
@@ -241,14 +313,56 @@ class GenericIE(InfoExtractor):
|
|||||||
# Don't set the extractor because it can be a track url or an album
|
# Don't set the extractor because it can be a track url or an album
|
||||||
return self.url_result(burl)
|
return self.url_result(burl)
|
||||||
|
|
||||||
|
# Look for embedded Vevo player
|
||||||
|
mobj = re.search(
|
||||||
|
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return self.url_result(mobj.group('url'))
|
||||||
|
|
||||||
|
# Look for Ooyala videos
|
||||||
|
mobj = re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=([^"&]+)', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return OoyalaIE._build_url_result(mobj.group(1))
|
||||||
|
|
||||||
|
# Look for Aparat videos
|
||||||
|
mobj = re.search(r'<iframe src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return self.url_result(mobj.group(1), 'Aparat')
|
||||||
|
|
||||||
|
# Look for MPORA videos
|
||||||
|
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return self.url_result(mobj.group(1), 'Mpora')
|
||||||
|
|
||||||
|
# Look for embedded Novamov player
|
||||||
|
mobj = re.search(
|
||||||
|
r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?novamov\.com/embed\.php.+?)\1', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return self.url_result(mobj.group('url'), 'Novamov')
|
||||||
|
|
||||||
|
# Look for embedded Facebook player
|
||||||
|
mobj = re.search(
|
||||||
|
r'<iframe[^>]+?src=(["\'])(?P<url>https://www\.facebook\.com/video/embed.+?)\1', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return self.url_result(mobj.group('url'), 'Facebook')
|
||||||
|
|
||||||
|
# Look for embedded Huffington Post player
|
||||||
|
mobj = re.search(
|
||||||
|
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return self.url_result(mobj.group('url'), 'HuffPost')
|
||||||
|
|
||||||
# Start with something easy: JW Player in SWFObject
|
# Start with something easy: JW Player in SWFObject
|
||||||
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
||||||
|
if mobj is None:
|
||||||
|
# Look for gorilla-vid style embedding
|
||||||
|
mobj = re.search(r'(?s)(?:jw_plugins|JWPlayerOptions).*?file\s*:\s*["\'](.*?)["\']', webpage)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
# Broaden the search a little bit
|
# Broaden the search a little bit
|
||||||
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
# Broaden the search a little bit: JWPlayer JS loader
|
# Broaden the search a little bit: JWPlayer JS loader
|
||||||
mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http[^\'"]*)', webpage)
|
mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
# Try to find twitter cards info
|
# Try to find twitter cards info
|
||||||
mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
||||||
@@ -263,17 +377,21 @@ class GenericIE(InfoExtractor):
|
|||||||
# HTML5 video
|
# HTML5 video
|
||||||
mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
|
mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ExtractorError(u'Unsupported URL: %s' % url)
|
raise ExtractorError('Unsupported URL: %s' % url)
|
||||||
|
|
||||||
# It's possible that one of the regexes
|
# It's possible that one of the regexes
|
||||||
# matched, but returned an empty group:
|
# matched, but returned an empty group:
|
||||||
if mobj.group(1) is None:
|
if mobj.group(1) is None:
|
||||||
raise ExtractorError(u'Did not find a valid video URL at %s' % url)
|
raise ExtractorError('Did not find a valid video URL at %s' % url)
|
||||||
|
|
||||||
video_url = mobj.group(1)
|
video_url = mobj.group(1)
|
||||||
video_url = compat_urlparse.urljoin(url, video_url)
|
video_url = compat_urlparse.urljoin(url, video_url)
|
||||||
video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
|
video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
|
||||||
|
|
||||||
|
# Sometimes, jwplayer extraction will result in a YouTube URL
|
||||||
|
if YoutubeIE.suitable(video_url):
|
||||||
|
return self.url_result(video_url, 'Youtube')
|
||||||
|
|
||||||
# here's a fun little line of code for you:
|
# here's a fun little line of code for you:
|
||||||
video_id = os.path.splitext(video_id)[0]
|
video_id = os.path.splitext(video_id)[0]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import re
|
import re
|
||||||
@@ -10,32 +11,28 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class GooglePlusIE(InfoExtractor):
|
class GooglePlusIE(InfoExtractor):
|
||||||
IE_DESC = u'Google Plus'
|
IE_DESC = 'Google Plus'
|
||||||
_VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
|
_VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
|
||||||
IE_NAME = u'plus.google'
|
IE_NAME = 'plus.google'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
|
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
|
||||||
u"file": u"ZButuJc6CtH.flv",
|
'info_dict': {
|
||||||
u"info_dict": {
|
'id': 'ZButuJc6CtH',
|
||||||
u"upload_date": u"20120613",
|
'ext': 'flv',
|
||||||
u"uploader": u"井上ヨシマサ",
|
'upload_date': '20120613',
|
||||||
u"title": u"嘆きの天使 降臨"
|
'uploader': '井上ヨシマサ',
|
||||||
|
'title': '嘆きの天使 降臨',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# Extract id from URL
|
# Extract id from URL
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
|
|
||||||
post_url = mobj.group(0)
|
video_id = mobj.group('id')
|
||||||
video_id = mobj.group(1)
|
|
||||||
|
|
||||||
video_extension = 'flv'
|
|
||||||
|
|
||||||
# Step 1, Retrieve post webpage to extract further information
|
# Step 1, Retrieve post webpage to extract further information
|
||||||
webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
|
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
|
|
||||||
@@ -43,7 +40,7 @@ class GooglePlusIE(InfoExtractor):
|
|||||||
upload_date = self._html_search_regex(
|
upload_date = self._html_search_regex(
|
||||||
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
|
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
|
||||||
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
|
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
|
||||||
webpage, u'upload date', fatal=False, flags=re.VERBOSE)
|
webpage, 'upload date', fatal=False, flags=re.VERBOSE)
|
||||||
if upload_date:
|
if upload_date:
|
||||||
# Convert timestring to a format suitable for filename
|
# Convert timestring to a format suitable for filename
|
||||||
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
|
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
|
||||||
@@ -51,28 +48,27 @@ class GooglePlusIE(InfoExtractor):
|
|||||||
|
|
||||||
# Extract uploader
|
# Extract uploader
|
||||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
||||||
webpage, u'uploader', fatal=False)
|
webpage, 'uploader', fatal=False)
|
||||||
|
|
||||||
# Extract title
|
# Extract title
|
||||||
# Get the first line for title
|
# Get the first line for title
|
||||||
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
||||||
webpage, 'title', default=u'NA')
|
webpage, 'title', default='NA')
|
||||||
|
|
||||||
# Step 2, Simulate clicking the image box to launch video
|
# Step 2, Simulate clicking the image box to launch video
|
||||||
DOMAIN = 'https://plus.google.com/'
|
DOMAIN = 'https://plus.google.com/'
|
||||||
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||||
webpage, u'video page URL')
|
webpage, 'video page URL')
|
||||||
if not video_page.startswith(DOMAIN):
|
if not video_page.startswith(DOMAIN):
|
||||||
video_page = DOMAIN + video_page
|
video_page = DOMAIN + video_page
|
||||||
|
|
||||||
webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
|
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
|
||||||
|
|
||||||
# Extract video links on video page
|
# Extract video links all sizes
|
||||||
"""Extract video links of all sizes"""
|
|
||||||
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
|
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
|
||||||
mobj = re.findall(pattern, webpage)
|
mobj = re.findall(pattern, webpage)
|
||||||
if len(mobj) == 0:
|
if len(mobj) == 0:
|
||||||
raise ExtractorError(u'Unable to extract video links')
|
raise ExtractorError('Unable to extract video links')
|
||||||
|
|
||||||
# Sort in resolution
|
# Sort in resolution
|
||||||
links = sorted(mobj)
|
links = sorted(mobj)
|
||||||
@@ -87,12 +83,11 @@ class GooglePlusIE(InfoExtractor):
|
|||||||
except AttributeError: # Python 3
|
except AttributeError: # Python 3
|
||||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
||||||
|
|
||||||
|
return {
|
||||||
return [{
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'ext': video_extension,
|
'ext': 'flv',
|
||||||
}]
|
}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@@ -8,32 +10,42 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class GoogleSearchIE(SearchInfoExtractor):
|
class GoogleSearchIE(SearchInfoExtractor):
|
||||||
IE_DESC = u'Google Video search'
|
IE_DESC = 'Google Video search'
|
||||||
_MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
|
|
||||||
_MAX_RESULTS = 1000
|
_MAX_RESULTS = 1000
|
||||||
IE_NAME = u'video.google:search'
|
IE_NAME = 'video.google:search'
|
||||||
_SEARCH_KEY = 'gvsearch'
|
_SEARCH_KEY = 'gvsearch'
|
||||||
|
|
||||||
def _get_n_results(self, query, n):
|
def _get_n_results(self, query, n):
|
||||||
"""Get a specified number of results for a query"""
|
"""Get a specified number of results for a query"""
|
||||||
|
|
||||||
|
entries = []
|
||||||
res = {
|
res = {
|
||||||
'_type': 'playlist',
|
'_type': 'playlist',
|
||||||
'id': query,
|
'id': query,
|
||||||
'entries': []
|
'title': query,
|
||||||
}
|
}
|
||||||
|
|
||||||
for pagenum in itertools.count(1):
|
for pagenum in itertools.count():
|
||||||
result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
|
result_url = (
|
||||||
webpage = self._download_webpage(result_url, u'gvsearch:' + query,
|
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
|
||||||
note='Downloading result page ' + str(pagenum))
|
% (compat_urllib_parse.quote_plus(query), pagenum * 10))
|
||||||
|
|
||||||
for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
|
webpage = self._download_webpage(
|
||||||
e = {
|
result_url, 'gvsearch:' + query,
|
||||||
|
note='Downloading result page ' + str(pagenum + 1))
|
||||||
|
|
||||||
|
for hit_idx, mobj in enumerate(re.finditer(
|
||||||
|
r'<h3 class="r"><a href="([^"]+)"', webpage)):
|
||||||
|
|
||||||
|
# Skip playlists
|
||||||
|
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
|
||||||
|
continue
|
||||||
|
|
||||||
|
entries.append({
|
||||||
'_type': 'url',
|
'_type': 'url',
|
||||||
'url': mobj.group(1)
|
'url': mobj.group(1)
|
||||||
}
|
})
|
||||||
res['entries'].append(e)
|
|
||||||
|
|
||||||
if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
|
if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
|
||||||
|
res['entries'] = entries[:n]
|
||||||
return res
|
return res
|
||||||
|
|||||||
62
youtube_dl/extractor/helsinki.py
Normal file
62
youtube_dl/extractor/helsinki.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class HelsinkiIE(InfoExtractor):
|
||||||
|
IE_DESC = 'helsinki.fi'
|
||||||
|
_VALID_URL = r'https?://video\.helsinki\.fi/Arkisto/flash\.php\?id=(?P<id>\d+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://video.helsinki.fi/Arkisto/flash.php?id=20258',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '20258',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Tietotekniikkafoorumi-iltapäivä',
|
||||||
|
'description': 'md5:f5c904224d43c133225130fe156a5ee0',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True, # RTMP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
formats = []
|
||||||
|
|
||||||
|
mobj = re.search(r'file=((\w+):[^&]+)', webpage)
|
||||||
|
if mobj:
|
||||||
|
formats.append({
|
||||||
|
'ext': mobj.group(2),
|
||||||
|
'play_path': mobj.group(1),
|
||||||
|
'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
|
||||||
|
'player_url': 'http://video.helsinki.fi/player.swf',
|
||||||
|
'format_note': 'sd',
|
||||||
|
'quality': 0,
|
||||||
|
})
|
||||||
|
|
||||||
|
mobj = re.search(r'hd\.file=((\w+):[^&]+)', webpage)
|
||||||
|
if mobj:
|
||||||
|
formats.append({
|
||||||
|
'ext': mobj.group(2),
|
||||||
|
'play_path': mobj.group(1),
|
||||||
|
'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
|
||||||
|
'player_url': 'http://video.helsinki.fi/player.swf',
|
||||||
|
'format_note': 'hd',
|
||||||
|
'quality': 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': self._og_search_title(webpage).replace('Video: ', ''),
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
@@ -1,17 +1,25 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_parse,
|
||||||
|
compat_urllib_request,
|
||||||
|
ExtractorError,
|
||||||
|
HEADRequest,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class HotNewHipHopIE(InfoExtractor):
|
class HotNewHipHopIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://www\.hotnewhiphop.com/.*\.(?P<id>.*)\.html'
|
_VALID_URL = r'http://www\.hotnewhiphop\.com/.*\.(?P<id>.*)\.html'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html",
|
'url': 'http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html',
|
||||||
u'file': u'1435540.mp3',
|
'file': '1435540.mp3',
|
||||||
u'md5': u'2c2cd2f76ef11a9b3b581e8b232f3d96',
|
'md5': '2c2cd2f76ef11a9b3b581e8b232f3d96',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"title": u'Freddie Gibbs "Lay It Down"'
|
'title': 'Freddie Gibbs - Lay It Down'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -21,24 +29,41 @@ class HotNewHipHopIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage_src = self._download_webpage(url, video_id)
|
webpage_src = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_url_base64 = self._search_regex(r'data-path="(.*?)"',
|
video_url_base64 = self._search_regex(
|
||||||
webpage_src, u'video URL', fatal=False)
|
r'data-path="(.*?)"', webpage_src, u'video URL', fatal=False)
|
||||||
|
|
||||||
if video_url_base64 == None:
|
if video_url_base64 is None:
|
||||||
video_url = self._search_regex(r'"contentUrl" content="(.*?)"', webpage_src,
|
video_url = self._search_regex(
|
||||||
u'video URL')
|
r'"contentUrl" content="(.*?)"', webpage_src, u'video URL')
|
||||||
return self.url_result(video_url, ie='Youtube')
|
return self.url_result(video_url, ie='Youtube')
|
||||||
|
|
||||||
video_url = base64.b64decode(video_url_base64).decode('utf-8')
|
reqdata = compat_urllib_parse.urlencode([
|
||||||
|
('mediaType', 's'),
|
||||||
|
('mediaId', video_id),
|
||||||
|
])
|
||||||
|
r = compat_urllib_request.Request(
|
||||||
|
'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata)
|
||||||
|
r.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
mkd = self._download_json(
|
||||||
|
r, video_id, note='Requesting media key',
|
||||||
|
errnote='Could not download media key')
|
||||||
|
if 'mediaKey' not in mkd:
|
||||||
|
raise ExtractorError('Did not get a media key')
|
||||||
|
|
||||||
video_title = self._html_search_regex(r"<title>(.*)</title>",
|
redirect_url = base64.b64decode(video_url_base64).decode('utf-8')
|
||||||
webpage_src, u'title')
|
redirect_req = HEADRequest(redirect_url)
|
||||||
|
req = self._request_webpage(
|
||||||
|
redirect_req, video_id,
|
||||||
|
note='Resolving final URL', errnote='Could not resolve final URL')
|
||||||
|
video_url = req.geturl()
|
||||||
|
if video_url.endswith('.html'):
|
||||||
|
raise ExtractorError('Redirect failed')
|
||||||
|
|
||||||
results = [{
|
video_title = self._og_search_title(webpage_src).strip()
|
||||||
|
|
||||||
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage_src),
|
'thumbnail': self._og_search_thumbnail(webpage_src),
|
||||||
'ext' : 'mp3',
|
}
|
||||||
}]
|
|
||||||
return results
|
|
||||||
|
|||||||
@@ -1,17 +1,20 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class HowcastIE(InfoExtractor):
|
class HowcastIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
|
'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
|
||||||
u'file': u'390161.mp4',
|
'md5': '8b743df908c42f60cf6496586c7f12c3',
|
||||||
u'md5': u'8b743df908c42f60cf6496586c7f12c3',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '390161',
|
||||||
u"description": u"The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here's the proper way to tie a square knot.",
|
'ext': 'mp4',
|
||||||
u"title": u"How to Tie a Square Knot Properly"
|
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
|
||||||
|
'title': 'How to Tie a Square Knot Properly',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -24,22 +27,15 @@ class HowcastIE(InfoExtractor):
|
|||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
|
|
||||||
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
||||||
webpage, u'video URL')
|
webpage, 'video URL')
|
||||||
|
|
||||||
video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
|
|
||||||
webpage, u'title')
|
|
||||||
|
|
||||||
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
||||||
webpage, u'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
|
|
||||||
thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
|
return {
|
||||||
webpage, u'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
return [{
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': 'mp4',
|
'title': self._og_search_title(webpage),
|
||||||
'title': video_title,
|
|
||||||
'description': video_description,
|
'description': video_description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
}]
|
}
|
||||||
|
|||||||
82
youtube_dl/extractor/huffpost.py
Normal file
82
youtube_dl/extractor/huffpost.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
parse_duration,
|
||||||
|
unified_strdate,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HuffPostIE(InfoExtractor):
|
||||||
|
IE_DESC = 'Huffington Post'
|
||||||
|
_VALID_URL = r'''(?x)
|
||||||
|
https?://(embed\.)?live\.huffingtonpost\.com/
|
||||||
|
(?:
|
||||||
|
r/segment/[^/]+/|
|
||||||
|
HPLEmbedPlayer/\?segmentId=
|
||||||
|
)
|
||||||
|
(?P<id>[0-9a-f]+)'''
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://live.huffingtonpost.com/r/segment/legalese-it/52dd3e4b02a7602131000677',
|
||||||
|
'file': '52dd3e4b02a7602131000677.mp4',
|
||||||
|
'md5': '55f5e8981c1c80a64706a44b74833de8',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Legalese It! with @MikeSacksHP',
|
||||||
|
'description': 'This week on Legalese It, Mike talks to David Bosco about his new book on the ICC, "Rough Justice," he also discusses the Virginia AG\'s historic stance on gay marriage, the execution of Edgar Tamayo, the ICC\'s delay of Kenya\'s President and more. ',
|
||||||
|
'duration': 1549,
|
||||||
|
'upload_date': '20140124',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
|
||||||
|
data = self._download_json(api_url, video_id)['data']
|
||||||
|
|
||||||
|
video_title = data['title']
|
||||||
|
duration = parse_duration(data['running_time'])
|
||||||
|
upload_date = unified_strdate(data['schedule']['starts_at'])
|
||||||
|
description = data.get('description')
|
||||||
|
|
||||||
|
thumbnails = []
|
||||||
|
for url in data['images'].values():
|
||||||
|
m = re.match('.*-([0-9]+x[0-9]+)\.', url)
|
||||||
|
if not m:
|
||||||
|
continue
|
||||||
|
thumbnails.append({
|
||||||
|
'url': url,
|
||||||
|
'resolution': m.group(1),
|
||||||
|
})
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'format': key,
|
||||||
|
'format_id': key.replace('/', '.'),
|
||||||
|
'ext': 'mp4',
|
||||||
|
'url': url,
|
||||||
|
'vcodec': 'none' if key.startswith('audio/') else None,
|
||||||
|
} for key, url in data['sources']['live'].items()]
|
||||||
|
if data.get('fivemin_id'):
|
||||||
|
fid = data['fivemin_id']
|
||||||
|
fcat = str(int(fid) // 100 + 1)
|
||||||
|
furl = 'http://avideos.5min.com/2/' + fcat[-3:] + '/' + fcat + '/' + fid + '.mp4'
|
||||||
|
formats.append({
|
||||||
|
'format': 'fivemin',
|
||||||
|
'url': furl,
|
||||||
|
'preference': 1,
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': video_title,
|
||||||
|
'description': description,
|
||||||
|
'formats': formats,
|
||||||
|
'duration': duration,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
}
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@@ -9,25 +11,25 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class ImdbIE(InfoExtractor):
|
class ImdbIE(InfoExtractor):
|
||||||
IE_NAME = u'imdb'
|
IE_NAME = 'imdb'
|
||||||
IE_DESC = u'Internet Movie Database trailers'
|
IE_DESC = 'Internet Movie Database trailers'
|
||||||
_VALID_URL = r'http://www\.imdb\.com/video/imdb/vi(?P<id>\d+)'
|
_VALID_URL = r'http://(?:www|m)\.imdb\.com/video/imdb/vi(?P<id>\d+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.imdb.com/video/imdb/vi2524815897',
|
'url': 'http://www.imdb.com/video/imdb/vi2524815897',
|
||||||
u'md5': u'9f34fa777ade3a6e57a054fdbcb3a068',
|
'md5': '9f34fa777ade3a6e57a054fdbcb3a068',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'2524815897',
|
'id': '2524815897',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'Ice Age: Continental Drift Trailer (No. 2) - IMDb',
|
'title': 'Ice Age: Continental Drift Trailer (No. 2) - IMDb',
|
||||||
u'description': u'md5:9061c2219254e5d14e03c25c98e96a81',
|
'description': 'md5:9061c2219254e5d14e03c25c98e96a81',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url,video_id)
|
webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
|
||||||
descr = get_element_by_attribute('itemprop', 'description', webpage)
|
descr = get_element_by_attribute('itemprop', 'description', webpage)
|
||||||
available_formats = re.findall(
|
available_formats = re.findall(
|
||||||
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
|
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
|
||||||
@@ -37,10 +39,10 @@ class ImdbIE(InfoExtractor):
|
|||||||
f_path = f_path.strip()
|
f_path = f_path.strip()
|
||||||
format_page = self._download_webpage(
|
format_page = self._download_webpage(
|
||||||
compat_urlparse.urljoin(url, f_path),
|
compat_urlparse.urljoin(url, f_path),
|
||||||
u'Downloading info for %s format' % f_id)
|
'Downloading info for %s format' % f_id)
|
||||||
json_data = self._search_regex(
|
json_data = self._search_regex(
|
||||||
r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
|
r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
|
||||||
format_page, u'json data', flags=re.DOTALL)
|
format_page, 'json data', flags=re.DOTALL)
|
||||||
info = json.loads(json_data)
|
info = json.loads(json_data)
|
||||||
format_info = info['videoPlayerObject']['video']
|
format_info = info['videoPlayerObject']['video']
|
||||||
formats.append({
|
formats.append({
|
||||||
@@ -55,3 +57,23 @@ class ImdbIE(InfoExtractor):
|
|||||||
'description': descr,
|
'description': descr,
|
||||||
'thumbnail': format_info['slate'],
|
'thumbnail': format_info['slate'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ImdbListIE(InfoExtractor):
|
||||||
|
IE_NAME = 'imdb:list'
|
||||||
|
IE_DESC = 'Internet Movie Database lists'
|
||||||
|
_VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
list_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, list_id)
|
||||||
|
entries = [
|
||||||
|
self.url_result('http://www.imdb.com' + m, 'Imdb')
|
||||||
|
for m in re.findall(r'href="(/video/imdb/vi[^"]+)"\s+data-type="playlist"', webpage)]
|
||||||
|
|
||||||
|
list_title = self._html_search_regex(
|
||||||
|
r'<h1 class="header">(.*?)</h1>', webpage, 'list title')
|
||||||
|
|
||||||
|
return self.playlist_result(entries, list_id, list_title)
|
||||||
|
|||||||
@@ -1,17 +1,20 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class InaIE(InfoExtractor):
|
class InaIE(InfoExtractor):
|
||||||
"""Information Extractor for Ina.fr"""
|
_VALID_URL = r'http://(?:www\.)?ina\.fr/video/(?P<id>I?[A-Z0-9]+)'
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I?[A-F0-9]+)/.*'
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
|
'url': 'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
|
||||||
u'file': u'I12055569.mp4',
|
'md5': 'a667021bf2b41f8dc6049479d9bb38a3',
|
||||||
u'md5': u'a667021bf2b41f8dc6049479d9bb38a3',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': 'I12055569',
|
||||||
u"title": u"Fran\u00e7ois Hollande \"Je crois que c'est clair\""
|
'ext': 'mp4',
|
||||||
|
'title': 'François Hollande "Je crois que c\'est clair"',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -20,20 +23,14 @@ class InaIE(InfoExtractor):
|
|||||||
|
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
mrss_url = 'http://player.ina.fr/notices/%s.mrss' % video_id
|
mrss_url = 'http://player.ina.fr/notices/%s.mrss' % video_id
|
||||||
video_extension = 'mp4'
|
info_doc = self._download_xml(mrss_url, video_id)
|
||||||
webpage = self._download_webpage(mrss_url, video_id)
|
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
|
|
||||||
video_url = self._html_search_regex(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)',
|
video_url = info_doc.find('.//{http://search.yahoo.com/mrss/}player').attrib['url']
|
||||||
webpage, u'video URL')
|
|
||||||
|
|
||||||
video_title = self._search_regex(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>',
|
return {
|
||||||
webpage, u'title')
|
|
||||||
|
|
||||||
return [{
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': video_extension,
|
'title': info_doc.find('.//title').text,
|
||||||
'title': video_title,
|
}
|
||||||
}]
|
|
||||||
|
|||||||
@@ -1,62 +1,55 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
|
|
||||||
ExtractorError,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class InfoQIE(InfoExtractor):
|
class InfoQIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
|
_VALID_URL = r'https?://(?:www\.)?infoq\.com/[^/]+/(?P<id>[^/]+)$'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u"name": u"InfoQ",
|
"name": "InfoQ",
|
||||||
u"url": u"http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
|
"url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
|
||||||
u"file": u"12-jan-pythonthings.mp4",
|
"file": "12-jan-pythonthings.mp4",
|
||||||
u"info_dict": {
|
"info_dict": {
|
||||||
u"description": u"Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.",
|
"description": "Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.",
|
||||||
u"title": u"A Few of My Favorite [Python] Things"
|
"title": "A Few of My Favorite [Python] Things",
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"skip_download": True,
|
||||||
},
|
},
|
||||||
u"params": {
|
|
||||||
u"skip_download": True
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id=url)
|
webpage = self._download_webpage(url, video_id)
|
||||||
self.report_extraction(url)
|
|
||||||
|
|
||||||
# Extract video URL
|
# Extract video URL
|
||||||
mobj = re.search(r"jsclassref ?= ?'([^']*)'", webpage)
|
encoded_id = self._search_regex(r"jsclassref ?= ?'([^']*)'", webpage, 'encoded id')
|
||||||
if mobj is None:
|
real_id = compat_urllib_parse.unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
|
||||||
raise ExtractorError(u'Unable to extract video url')
|
|
||||||
real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
|
|
||||||
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
|
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
|
||||||
|
|
||||||
# Extract title
|
# Extract title
|
||||||
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
||||||
webpage, u'title')
|
webpage, 'title')
|
||||||
|
|
||||||
# Extract description
|
# Extract description
|
||||||
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
||||||
webpage, u'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
|
|
||||||
video_filename = video_url.split('/')[-1]
|
video_filename = video_url.split('/')[-1]
|
||||||
video_id, extension = video_filename.split('.')
|
video_id, extension = video_filename.split('.')
|
||||||
|
|
||||||
info = {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'uploader': None,
|
|
||||||
'upload_date': None,
|
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'ext': extension, # Extension is always(?) mp4, but seems to be flv
|
'ext': extension, # Extension is always(?) mp4, but seems to be flv
|
||||||
'thumbnail': None,
|
|
||||||
'description': video_description,
|
'description': video_description,
|
||||||
}
|
}
|
||||||
|
|
||||||
return [info]
|
|
||||||
@@ -1,35 +1,39 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class InstagramIE(InfoExtractor):
|
class InstagramIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:http://)?instagram\.com/p/(.*?)/'
|
_VALID_URL = r'http://instagram\.com/p/(?P<id>.*?)/'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
|
'url': 'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
|
||||||
u'file': u'aye83DjauH.mp4',
|
'md5': '0d2da106a9d2631273e192b372806516',
|
||||||
u'md5': u'0d2da106a9d2631273e192b372806516',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': 'aye83DjauH',
|
||||||
u"uploader_id": u"naomipq",
|
'ext': 'mp4',
|
||||||
u"title": u"Video by naomipq",
|
'uploader_id': 'naomipq',
|
||||||
u'description': u'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
'title': 'Video by naomipq',
|
||||||
|
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group(1)
|
video_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
||||||
webpage, u'uploader id', fatal=False)
|
webpage, 'uploader id', fatal=False)
|
||||||
desc = self._search_regex(r'"caption":"(.*?)"', webpage, u'description',
|
desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
|
||||||
fatal=False)
|
fatal=False)
|
||||||
|
|
||||||
return [{
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': self._og_search_video_url(webpage, secure=False),
|
'url': self._og_search_video_url(webpage, secure=False),
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': u'Video by %s' % uploader_id,
|
'title': 'Video by %s' % uploader_id,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'uploader_id': uploader_id,
|
'uploader_id': uploader_id,
|
||||||
'description': desc,
|
'description': desc,
|
||||||
}]
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ from ..utils import (
|
|||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
determine_ext,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -63,13 +62,17 @@ class InternetVideoArchiveIE(InfoExtractor):
|
|||||||
for content in item.findall(_bp('media:group/media:content')):
|
for content in item.findall(_bp('media:group/media:content')):
|
||||||
attr = content.attrib
|
attr = content.attrib
|
||||||
f_url = attr['url']
|
f_url = attr['url']
|
||||||
|
width = int(attr['width'])
|
||||||
|
bitrate = int(attr['bitrate'])
|
||||||
|
format_id = '%d-%dk' % (width, bitrate)
|
||||||
formats.append({
|
formats.append({
|
||||||
|
'format_id': format_id,
|
||||||
'url': f_url,
|
'url': f_url,
|
||||||
'ext': determine_ext(f_url),
|
'width': width,
|
||||||
'width': int(attr['width']),
|
'tbr': bitrate,
|
||||||
'bitrate': int(attr['bitrate']),
|
|
||||||
})
|
})
|
||||||
formats = sorted(formats, key=lambda f: f['bitrate'])
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|||||||
85
youtube_dl/extractor/iprima.py
Normal file
85
youtube_dl/extractor/iprima.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
from random import random
|
||||||
|
from math import floor
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import compat_urllib_request
|
||||||
|
|
||||||
|
|
||||||
|
class IPrimaIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://play\.iprima\.cz/(?P<videogroup>.+)/(?P<videoid>.+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://play.iprima.cz/particka/particka-92',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '39152',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Partička (92)',
|
||||||
|
'description': 'md5:3740fda51464da35a2d4d0670b8e4fd6',
|
||||||
|
'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('videoid')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
player_url = 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % (
|
||||||
|
floor(random()*1073741824),
|
||||||
|
floor(random()*1073741824))
|
||||||
|
|
||||||
|
req = compat_urllib_request.Request(player_url)
|
||||||
|
req.add_header('Referer', url)
|
||||||
|
playerpage = self._download_webpage(req, video_id)
|
||||||
|
|
||||||
|
base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1])
|
||||||
|
|
||||||
|
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
|
||||||
|
|
||||||
|
if zoneGEO != '0':
|
||||||
|
base_url = base_url.replace('token', 'token_'+zoneGEO)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for format_id in ['lq', 'hq', 'hd']:
|
||||||
|
filename = self._html_search_regex(r'"%s_id":(.+?),' % format_id, webpage, 'filename')
|
||||||
|
|
||||||
|
if filename == 'null':
|
||||||
|
continue
|
||||||
|
|
||||||
|
real_id = self._search_regex(r'Prima-[0-9]{10}-([0-9]+)_', filename, 'real video id')
|
||||||
|
|
||||||
|
if format_id == 'lq':
|
||||||
|
quality = 0
|
||||||
|
elif format_id == 'hq':
|
||||||
|
quality = 1
|
||||||
|
elif format_id == 'hd':
|
||||||
|
quality = 2
|
||||||
|
filename = 'hq/'+filename
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_id,
|
||||||
|
'url': base_url,
|
||||||
|
'quality': quality,
|
||||||
|
'play_path': 'mp4:'+filename.replace('"', '')[:-4],
|
||||||
|
'rtmp_live': True,
|
||||||
|
'ext': 'flv',
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': real_id,
|
||||||
|
'title': self._og_search_title(webpage),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
'formats': formats,
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
}
|
||||||
160
youtube_dl/extractor/ivi.py
Normal file
160
youtube_dl/extractor/ivi.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_request,
|
||||||
|
ExtractorError,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class IviIE(InfoExtractor):
|
||||||
|
IE_DESC = 'ivi.ru'
|
||||||
|
IE_NAME = 'ivi'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch(?:/(?P<compilationid>[^/]+))?/(?P<videoid>\d+)'
|
||||||
|
|
||||||
|
_TESTS = [
|
||||||
|
# Single movie
|
||||||
|
{
|
||||||
|
'url': 'http://www.ivi.ru/watch/53141',
|
||||||
|
'md5': '6ff5be2254e796ed346251d117196cf4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '53141',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Иван Васильевич меняет профессию',
|
||||||
|
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
|
||||||
|
'duration': 5498,
|
||||||
|
'thumbnail': 'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg',
|
||||||
|
},
|
||||||
|
'skip': 'Only works from Russia',
|
||||||
|
},
|
||||||
|
# Serial's serie
|
||||||
|
{
|
||||||
|
'url': 'http://www.ivi.ru/watch/dezhurnyi_angel/74791',
|
||||||
|
'md5': '3e6cc9a848c1d2ebcc6476444967baa9',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '74791',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Дежурный ангел - 1 серия',
|
||||||
|
'duration': 2490,
|
||||||
|
'thumbnail': 'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
|
||||||
|
},
|
||||||
|
'skip': 'Only works from Russia',
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Sorted by quality
|
||||||
|
_known_formats = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
|
||||||
|
|
||||||
|
# Sorted by size
|
||||||
|
_known_thumbnails = ['Thumb-120x90', 'Thumb-160', 'Thumb-640x480']
|
||||||
|
|
||||||
|
def _extract_description(self, html):
|
||||||
|
m = re.search(r'<meta name="description" content="(?P<description>[^"]+)"/>', html)
|
||||||
|
return m.group('description') if m is not None else None
|
||||||
|
|
||||||
|
def _extract_comment_count(self, html):
|
||||||
|
m = re.search('(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html)
|
||||||
|
return int(m.group('commentcount')) if m is not None else 0
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('videoid')
|
||||||
|
|
||||||
|
api_url = 'http://api.digitalaccess.ru/api/json/'
|
||||||
|
|
||||||
|
data = {'method': 'da.content.get',
|
||||||
|
'params': [video_id, {'site': 's183',
|
||||||
|
'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
|
||||||
|
'contentid': video_id
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
request = compat_urllib_request.Request(api_url, json.dumps(data))
|
||||||
|
|
||||||
|
video_json_page = self._download_webpage(request, video_id, 'Downloading video JSON')
|
||||||
|
video_json = json.loads(video_json_page)
|
||||||
|
|
||||||
|
if 'error' in video_json:
|
||||||
|
error = video_json['error']
|
||||||
|
if error['origin'] == 'NoRedisValidData':
|
||||||
|
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||||
|
raise ExtractorError('Unable to download video %s: %s' % (video_id, error['message']), expected=True)
|
||||||
|
|
||||||
|
result = video_json['result']
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'url': x['url'],
|
||||||
|
'format_id': x['content_format'],
|
||||||
|
'preference': self._known_formats.index(x['content_format']),
|
||||||
|
} for x in result['files'] if x['content_format'] in self._known_formats]
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
if not formats:
|
||||||
|
raise ExtractorError('No media links available for %s' % video_id)
|
||||||
|
|
||||||
|
duration = result['duration']
|
||||||
|
compilation = result['compilation']
|
||||||
|
title = result['title']
|
||||||
|
|
||||||
|
title = '%s - %s' % (compilation, title) if compilation is not None else title
|
||||||
|
|
||||||
|
previews = result['preview']
|
||||||
|
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
|
||||||
|
thumbnail = previews[-1]['url'] if len(previews) > 0 else None
|
||||||
|
|
||||||
|
video_page = self._download_webpage(url, video_id, 'Downloading video page')
|
||||||
|
description = self._extract_description(video_page)
|
||||||
|
comment_count = self._extract_comment_count(video_page)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'description': description,
|
||||||
|
'duration': duration,
|
||||||
|
'comment_count': comment_count,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class IviCompilationIE(InfoExtractor):
|
||||||
|
IE_DESC = 'ivi.ru compilations'
|
||||||
|
IE_NAME = 'ivi:compilation'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
|
||||||
|
|
||||||
|
def _extract_entries(self, html, compilation_id):
|
||||||
|
return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
|
||||||
|
for serie in re.findall(r'<strong><a href="/watch/%s/(\d+)">(?:[^<]+)</a></strong>' % compilation_id, html)]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
compilation_id = mobj.group('compilationid')
|
||||||
|
season_id = mobj.group('seasonid')
|
||||||
|
|
||||||
|
if season_id is not None: # Season link
|
||||||
|
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
|
||||||
|
playlist_id = '%s/season%s' % (compilation_id, season_id)
|
||||||
|
playlist_title = self._html_search_meta('title', season_page, 'title')
|
||||||
|
entries = self._extract_entries(season_page, compilation_id)
|
||||||
|
else: # Compilation link
|
||||||
|
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
|
||||||
|
playlist_id = compilation_id
|
||||||
|
playlist_title = self._html_search_meta('title', compilation_page, 'title')
|
||||||
|
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
|
||||||
|
if len(seasons) == 0: # No seasons in this compilation
|
||||||
|
entries = self._extract_entries(compilation_page, compilation_id)
|
||||||
|
else:
|
||||||
|
entries = []
|
||||||
|
for season_id in seasons:
|
||||||
|
season_page = self._download_webpage(
|
||||||
|
'http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
|
||||||
|
compilation_id, 'Downloading season %s web page' % season_id)
|
||||||
|
entries.extend(self._extract_entries(season_page, compilation_id))
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, playlist_title)
|
||||||
48
youtube_dl/extractor/jadorecettepub.py
Normal file
48
youtube_dl/extractor/jadorecettepub.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from .youtube import YoutubeIE
|
||||||
|
|
||||||
|
|
||||||
|
class JadoreCettePubIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://(?:www\.)?jadorecettepub\.com/[0-9]{4}/[0-9]{2}/(?P<id>.*?)\.html'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.jadorecettepub.com/2010/12/star-wars-massacre-par-les-japonais.html',
|
||||||
|
'md5': '401286a06067c70b44076044b66515de',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'jLMja3tr7a4',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'La pire utilisation de Star Wars',
|
||||||
|
'description': "Jadorecettepub.com vous a gratifié de plusieurs pubs géniales utilisant Star Wars et Dark Vador plus particulièrement... Mais l'heure est venue de vous proposer une version totalement massacrée, venue du Japon. Quand les Japonais détruisent l'image de Star Wars pour vendre du thon en boite, ça promet...",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
display_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<span style="font-size: x-large;"><b>(.*?)</b></span>',
|
||||||
|
webpage, 'title')
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'(?s)<div id="fb-root">(.*?)<script>', webpage, 'description',
|
||||||
|
fatal=False)
|
||||||
|
real_url = self._search_regex(
|
||||||
|
r'\[/postlink\](.*)endofvid', webpage, 'video URL')
|
||||||
|
video_id = YoutubeIE.extract_id(real_url)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': real_url,
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
}
|
||||||
|
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@@ -10,12 +12,13 @@ class JeuxVideoIE(InfoExtractor):
|
|||||||
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm'
|
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
|
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
|
||||||
u'file': u'5182.mp4',
|
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
|
||||||
u'md5': u'046e491afb32a8aaac1f44dd4ddd54ee',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '5182',
|
||||||
u'title': u'GC 2013 : Tearaway nous présente ses papiers d\'identité',
|
'ext': 'mp4',
|
||||||
u'description': u'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
|
'title': 'GC 2013 : Tearaway nous présente ses papiers d\'identité',
|
||||||
|
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -25,14 +28,14 @@ class JeuxVideoIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, title)
|
webpage = self._download_webpage(url, title)
|
||||||
xml_link = self._html_search_regex(
|
xml_link = self._html_search_regex(
|
||||||
r'<param name="flashvars" value="config=(.*?)" />',
|
r'<param name="flashvars" value="config=(.*?)" />',
|
||||||
webpage, u'config URL')
|
webpage, 'config URL')
|
||||||
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
||||||
xml_link, u'video ID')
|
xml_link, 'video ID')
|
||||||
|
|
||||||
config = self._download_xml(
|
config = self._download_xml(
|
||||||
xml_link, title, u'Downloading XML config')
|
xml_link, title, 'Downloading XML config')
|
||||||
info_json = config.find('format.json').text
|
info_json = config.find('format.json').text
|
||||||
info = json.loads(info_json)['versions'][0]
|
info = json.loads(info_json)['versions'][0]
|
||||||
|
|
||||||
|
|||||||
73
youtube_dl/extractor/jpopsukitv.py
Normal file
73
youtube_dl/extractor/jpopsukitv.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
unified_strdate,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class JpopsukiIE(InfoExtractor):
|
||||||
|
IE_NAME = 'jpopsuki.tv'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
|
||||||
|
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
|
||||||
|
'file': '00be659d23b0b40508169cdee4545771.mp4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '00be659d23b0b40508169cdee4545771',
|
||||||
|
'title': 'ayumi hamasaki - evolution',
|
||||||
|
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
|
||||||
|
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
|
||||||
|
'uploader': 'plama_chan',
|
||||||
|
'uploader_id': '404',
|
||||||
|
'upload_date': '20121101'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
video_url = 'http://www.jpopsuki.tv' + self._html_search_regex(
|
||||||
|
r'<source src="(.*?)" type', webpage, 'video url')
|
||||||
|
|
||||||
|
video_title = self._og_search_title(webpage)
|
||||||
|
description = self._og_search_description(webpage)
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
uploader = self._html_search_regex(
|
||||||
|
r'<li>from: <a href="/user/view/user/(.*?)/uid/',
|
||||||
|
webpage, 'video uploader', fatal=False)
|
||||||
|
uploader_id = self._html_search_regex(
|
||||||
|
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
|
||||||
|
webpage, 'video uploader_id', fatal=False)
|
||||||
|
upload_date = self._html_search_regex(
|
||||||
|
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
|
||||||
|
fatal=False)
|
||||||
|
if upload_date is not None:
|
||||||
|
upload_date = unified_strdate(upload_date)
|
||||||
|
view_count_str = self._html_search_regex(
|
||||||
|
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
|
||||||
|
fatal=False)
|
||||||
|
comment_count_str = self._html_search_regex(
|
||||||
|
r'<h2>([0-9]+?) comments</h2>', webpage, 'video comment_count',
|
||||||
|
fatal=False)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': video_title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'uploader': uploader,
|
||||||
|
'uploader_id': uploader_id,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
'view_count': int_or_none(view_count_str),
|
||||||
|
'comment_count': int_or_none(comment_count_str),
|
||||||
|
}
|
||||||
@@ -1,21 +1,24 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import hashlib
|
import hashlib
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
|
||||||
|
|
||||||
_md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
_md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
class KankanIE(InfoExtractor):
|
class KankanIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
|
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://yinyue.kankan.com/vod/48/48863.shtml',
|
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
|
||||||
u'file': u'48863.flv',
|
'file': '48863.flv',
|
||||||
u'md5': u'29aca1e47ae68fc28804aca89f29507e',
|
'md5': '29aca1e47ae68fc28804aca89f29507e',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'title': u'Ready To Go',
|
'title': 'Ready To Go',
|
||||||
},
|
},
|
||||||
|
'skip': 'Only available from China',
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -23,22 +26,23 @@ class KankanIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, u'video title')
|
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
|
||||||
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
|
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
|
||||||
gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
|
gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
|
||||||
gcid = gcids[-1]
|
gcid = gcids[-1]
|
||||||
|
|
||||||
video_info_page = self._download_webpage('http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid,
|
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
|
||||||
video_id, u'Downloading video url info')
|
video_info_page = self._download_webpage(
|
||||||
ip = self._search_regex(r'ip:"(.+?)"', video_info_page, u'video url ip')
|
info_url, video_id, 'Downloading video url info')
|
||||||
path = self._search_regex(r'path:"(.+?)"', video_info_page, u'video url path')
|
ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip')
|
||||||
param1 = self._search_regex(r'param1:(\d+)', video_info_page, u'param1')
|
path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path')
|
||||||
param2 = self._search_regex(r'param2:(\d+)', video_info_page, u'param2')
|
param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1')
|
||||||
|
param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2')
|
||||||
key = _md5('xl_mp43651' + param1 + param2)
|
key = _md5('xl_mp43651' + param1 + param2)
|
||||||
video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
|
video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
|
||||||
|
|
||||||
return {'id': video_id,
|
return {
|
||||||
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': determine_ext(video_url),
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -5,36 +7,34 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
class KeekIE(InfoExtractor):
|
class KeekIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
|
_VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
|
||||||
IE_NAME = u'keek'
|
IE_NAME = 'keek'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'https://www.keek.com/ytdl/keeks/NODfbab',
|
'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
|
||||||
u'file': u'NODfbab.mp4',
|
'file': 'NODfbab.mp4',
|
||||||
u'md5': u'9b0636f8c0f7614afa4ea5e4c6e57e83',
|
'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u"uploader": u"ytdl",
|
'uploader': 'ytdl',
|
||||||
u"title": u"test chars: \"'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de ."
|
'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
m = re.match(self._VALID_URL, url)
|
m = re.match(self._VALID_URL, url)
|
||||||
video_id = m.group('videoID')
|
video_id = m.group('videoID')
|
||||||
|
|
||||||
video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
|
video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
|
||||||
thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
|
thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_title = self._og_search_title(webpage)
|
uploader = self._html_search_regex(
|
||||||
|
r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
|
||||||
|
webpage, 'uploader', fatal=False)
|
||||||
|
|
||||||
uploader = self._html_search_regex(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
|
return {
|
||||||
webpage, u'uploader', fatal=False)
|
|
||||||
|
|
||||||
info = {
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': video_title,
|
'title': self._og_search_title(webpage),
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'uploader': uploader
|
'uploader': uploader
|
||||||
}
|
}
|
||||||
return [info]
|
|
||||||
|
|||||||
71
youtube_dl/extractor/khanacademy.py
Normal file
71
youtube_dl/extractor/khanacademy.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
unified_strdate,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class KhanAcademyIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'^https?://(?:www\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
|
||||||
|
IE_NAME = 'KhanAcademy'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.khanacademy.org/video/one-time-pad',
|
||||||
|
'file': 'one-time-pad.mp4',
|
||||||
|
'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'The one-time pad',
|
||||||
|
'description': 'The perfect cipher',
|
||||||
|
'duration': 176,
|
||||||
|
'uploader': 'Brit Cruise',
|
||||||
|
'upload_date': '20120411',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
m = re.match(self._VALID_URL, url)
|
||||||
|
video_id = m.group('id')
|
||||||
|
|
||||||
|
if m.group('key') == 'video':
|
||||||
|
data = self._download_json(
|
||||||
|
'http://api.khanacademy.org/api/v1/videos/' + video_id,
|
||||||
|
video_id, 'Downloading video info')
|
||||||
|
|
||||||
|
upload_date = unified_strdate(data['date_added'])
|
||||||
|
uploader = ', '.join(data['author_names'])
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': data['url'],
|
||||||
|
'id': video_id,
|
||||||
|
'title': data['title'],
|
||||||
|
'thumbnail': data['image_url'],
|
||||||
|
'duration': data['duration'],
|
||||||
|
'description': data['description'],
|
||||||
|
'uploader': uploader,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# topic
|
||||||
|
data = self._download_json(
|
||||||
|
'http://api.khanacademy.org/api/v1/topic/' + video_id,
|
||||||
|
video_id, 'Downloading topic info')
|
||||||
|
|
||||||
|
entries = [
|
||||||
|
{
|
||||||
|
'_type': 'url',
|
||||||
|
'url': c['url'],
|
||||||
|
'id': c['id'],
|
||||||
|
'title': c['title'],
|
||||||
|
}
|
||||||
|
for c in data['children'] if c['kind'] in ('Video', 'Topic')]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': video_id,
|
||||||
|
'title': data['title'],
|
||||||
|
'description': data['description'],
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user