Compare commits
2924 Commits
2014.07.11
...
2015.04.03
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9cbab1735 | ||
|
|
6b7556a554 | ||
|
|
a3c7019e06 | ||
|
|
416b9c29f7 | ||
|
|
2ec8e04cac | ||
|
|
e03bfb30ce | ||
|
|
f5b669113f | ||
|
|
d08225edf4 | ||
|
|
8075d4f99d | ||
|
|
1a944d8a2a | ||
|
|
7cf02b6619 | ||
|
|
55cde6ef3c | ||
|
|
69c3af567d | ||
|
|
60e1fe0079 | ||
|
|
4669393070 | ||
|
|
ce3bfe5d57 | ||
|
|
2a0c2ca2b8 | ||
|
|
c89fbfb385 | ||
|
|
facecb84a1 | ||
|
|
ed06e9949b | ||
|
|
e15307a612 | ||
|
|
5cbb2699ee | ||
|
|
a2edf2e7ff | ||
|
|
1d31e7a2fc | ||
|
|
a2a4d5fa31 | ||
|
|
a28ccbabc6 | ||
|
|
edd7344820 | ||
|
|
c808ef81bb | ||
|
|
fd203fe357 | ||
|
|
5bb7ab9928 | ||
|
|
87270c8416 | ||
|
|
ebc2f7a2db | ||
|
|
7700207ec7 | ||
|
|
4d5d14f5cf | ||
|
|
72b249bf1f | ||
|
|
9b4774b21b | ||
|
|
2ddf083588 | ||
|
|
8343a03357 | ||
|
|
ad320e9b83 | ||
|
|
ecb750a446 | ||
|
|
5f88e02818 | ||
|
|
616af2f4b9 | ||
|
|
5a3b315b5f | ||
|
|
b7a2268e7b | ||
|
|
20d729228c | ||
|
|
af8c93086c | ||
|
|
79fd11ab8e | ||
|
|
cb88671e37 | ||
|
|
ff79552f13 | ||
|
|
643fe72717 | ||
|
|
4747e2183a | ||
|
|
c59e701e35 | ||
|
|
8e678af4ba | ||
|
|
70a1165b32 | ||
|
|
af14000215 | ||
|
|
998e6cdba0 | ||
|
|
2315fb5e5f | ||
|
|
157e9e5aa5 | ||
|
|
c496ec0848 | ||
|
|
15b67a268a | ||
|
|
31c4809827 | ||
|
|
ac0df2350a | ||
|
|
223b27f46c | ||
|
|
425142be60 | ||
|
|
7e17ec8c71 | ||
|
|
448830ce7b | ||
|
|
8896b614a9 | ||
|
|
a7fce980ad | ||
|
|
91757b0f37 | ||
|
|
fbfcc2972b | ||
|
|
db40364b87 | ||
|
|
094ce39c45 | ||
|
|
ae67d082fe | ||
|
|
8f76df7f37 | ||
|
|
5c19d18cbf | ||
|
|
838b93405b | ||
|
|
2676caf344 | ||
|
|
48c971e073 | ||
|
|
f5e2efbbf0 | ||
|
|
5d1f0e607b | ||
|
|
b0872c19ea | ||
|
|
9f790b9901 | ||
|
|
c41a2ec4af | ||
|
|
32d687f55e | ||
|
|
93f787070f | ||
|
|
f9544f6e8f | ||
|
|
336d19044c | ||
|
|
7866c9e173 | ||
|
|
1a4123de04 | ||
|
|
cf2e2eb1c0 | ||
|
|
2051acdeb2 | ||
|
|
cefdf970cc | ||
|
|
a1d0aa7b88 | ||
|
|
49aeedb8cb | ||
|
|
ef249a2cd7 | ||
|
|
a09141548a | ||
|
|
5379a2d40d | ||
|
|
c9450c7ab1 | ||
|
|
faa1b5c292 | ||
|
|
393d9fc6d2 | ||
|
|
4e6a228689 | ||
|
|
179d6678b1 | ||
|
|
85698c5086 | ||
|
|
a7d9ded45d | ||
|
|
531980d89c | ||
|
|
1887ecd4d6 | ||
|
|
cd32c2caba | ||
|
|
1c9a1457fc | ||
|
|
038b0eb1da | ||
|
|
f20bf146e2 | ||
|
|
01218f919b | ||
|
|
2684871bc1 | ||
|
|
ccf3960eec | ||
|
|
eecc0685c9 | ||
|
|
2ed849eccf | ||
|
|
3378d67a18 | ||
|
|
f3c0c667a6 | ||
|
|
0ae8bbac2d | ||
|
|
cbc3cfcab4 | ||
|
|
b30ef07c6c | ||
|
|
73900846b1 | ||
|
|
d1dc7e3991 | ||
|
|
3073a6d5e9 | ||
|
|
aae53774f2 | ||
|
|
7a757b7194 | ||
|
|
fa8ce26904 | ||
|
|
2c2c06e359 | ||
|
|
ee580538fa | ||
|
|
c3c5c31517 | ||
|
|
ed9a25dd61 | ||
|
|
9ef4f12b53 | ||
|
|
84f8101606 | ||
|
|
b1337948eb | ||
|
|
98f02fdde2 | ||
|
|
048fdc2292 | ||
|
|
2ca1c5aa9f | ||
|
|
674fb0fcc5 | ||
|
|
00bfe40e4d | ||
|
|
cd459b1d49 | ||
|
|
92a4793b3c | ||
|
|
dc03a42537 | ||
|
|
219da6bb68 | ||
|
|
0499cd866e | ||
|
|
13047f4135 | ||
|
|
af69cab21d | ||
|
|
d41a3fa1b4 | ||
|
|
733be371af | ||
|
|
576904bce6 | ||
|
|
cf47794f09 | ||
|
|
c06a9f8730 | ||
|
|
2e90dff2c2 | ||
|
|
90183a46d8 | ||
|
|
b68eedba23 | ||
|
|
d5b559393b | ||
|
|
1de4ac1385 | ||
|
|
39aa42ffbb | ||
|
|
ec1b9577ba | ||
|
|
3b4444f99a | ||
|
|
613b2d9dc6 | ||
|
|
8f4cc22455 | ||
|
|
7c42327e0e | ||
|
|
873383e9bd | ||
|
|
8508557e77 | ||
|
|
4d1652484f | ||
|
|
88cf6fb368 | ||
|
|
e7db87f700 | ||
|
|
2cb434e53e | ||
|
|
cd65491c30 | ||
|
|
082b1155a3 | ||
|
|
9202b1b787 | ||
|
|
a7e01c438d | ||
|
|
05be67e77d | ||
|
|
85741b9986 | ||
|
|
f247a199fe | ||
|
|
29171bc2d2 | ||
|
|
7be5a62ed7 | ||
|
|
3647136f24 | ||
|
|
13598940e3 | ||
|
|
0eb365868e | ||
|
|
28c6411e49 | ||
|
|
bba3fc7960 | ||
|
|
fcd877013e | ||
|
|
ba1d4c0488 | ||
|
|
517bcca299 | ||
|
|
1b53778175 | ||
|
|
b7a0304d92 | ||
|
|
545315a985 | ||
|
|
3f4327520c | ||
|
|
4a34f69ea6 | ||
|
|
fb7e68833c | ||
|
|
486dd09e0b | ||
|
|
054b99a330 | ||
|
|
65c5e044c7 | ||
|
|
11984c7467 | ||
|
|
3946864c8a | ||
|
|
b84037013e | ||
|
|
1dbfc62d75 | ||
|
|
d7d79106c7 | ||
|
|
1138491631 | ||
|
|
71705fa70d | ||
|
|
602814adab | ||
|
|
3a77719c5a | ||
|
|
7e195d0e92 | ||
|
|
e04793401d | ||
|
|
a3fbd18824 | ||
|
|
c6052b8c14 | ||
|
|
c792b5011f | ||
|
|
32aaeca775 | ||
|
|
1593194c63 | ||
|
|
614a7e1e23 | ||
|
|
2ebfeacabc | ||
|
|
f5d8f58a17 | ||
|
|
937daef4a7 | ||
|
|
dd77f14c64 | ||
|
|
c36cbe5a8a | ||
|
|
41b2194f86 | ||
|
|
d1e2e8f583 | ||
|
|
47fe42e1ab | ||
|
|
4c60393854 | ||
|
|
f848215dfc | ||
|
|
dcca581967 | ||
|
|
d475b3384c | ||
|
|
dd7831fe94 | ||
|
|
cc08b11d16 | ||
|
|
8bba753cca | ||
|
|
43d6280d0a | ||
|
|
e5a11a2293 | ||
|
|
f18ef2d144 | ||
|
|
1bb5c511a5 | ||
|
|
d55de57b67 | ||
|
|
a2aaf4dbc6 | ||
|
|
bdf6eee0ae | ||
|
|
8b910bda0c | ||
|
|
24993e3b39 | ||
|
|
11101076a1 | ||
|
|
f838875726 | ||
|
|
28778d6bae | ||
|
|
1132eae56d | ||
|
|
d34e79492d | ||
|
|
ab205b9dc8 | ||
|
|
7dcad95d4f | ||
|
|
8a48223a7b | ||
|
|
d47ae7f620 | ||
|
|
135c9c42bf | ||
|
|
0bf79ac455 | ||
|
|
98998cded6 | ||
|
|
14137b5781 | ||
|
|
a172d96292 | ||
|
|
23ba76bc0e | ||
|
|
61e00a9775 | ||
|
|
d1508cd68d | ||
|
|
9c85b5376d | ||
|
|
3c6f245083 | ||
|
|
f207019ce5 | ||
|
|
bd05aa4e24 | ||
|
|
8dc9d361c2 | ||
|
|
d0e958c71c | ||
|
|
a0bb7c5593 | ||
|
|
7feddd9fc7 | ||
|
|
55969016e9 | ||
|
|
9609f02e3c | ||
|
|
5c7495a194 | ||
|
|
5ee6fc974e | ||
|
|
c2ebea6580 | ||
|
|
12a129ec6d | ||
|
|
f28fe66970 | ||
|
|
123397317c | ||
|
|
dc570c4951 | ||
|
|
22d3628319 | ||
|
|
50c9949d7a | ||
|
|
376817c6d4 | ||
|
|
63fc800057 | ||
|
|
e0d0572b73 | ||
|
|
7fde87c77d | ||
|
|
938c3f65b6 | ||
|
|
2461f79d2a | ||
|
|
499bfcbfd0 | ||
|
|
07490f8017 | ||
|
|
91410c9bfa | ||
|
|
a7440261c5 | ||
|
|
76c73715fb | ||
|
|
c75f0b361a | ||
|
|
295df4edb9 | ||
|
|
562ceab13d | ||
|
|
2f0f6578c3 | ||
|
|
30cbd4e0d6 | ||
|
|
549e58069c | ||
|
|
7594be85ff | ||
|
|
3630034609 | ||
|
|
4e01501bbf | ||
|
|
1aa5172f56 | ||
|
|
f7e2ee8fa6 | ||
|
|
66dc9a3701 | ||
|
|
31bd39256b | ||
|
|
003c69a84b | ||
|
|
0134901108 | ||
|
|
eee6293d57 | ||
|
|
8237bec4f0 | ||
|
|
29cad7ad13 | ||
|
|
0d103de3b0 | ||
|
|
a0090691d0 | ||
|
|
6c87c2eea8 | ||
|
|
58c2ec6ab3 | ||
|
|
df5ae3eb16 | ||
|
|
efda2d7854 | ||
|
|
e143f5dae9 | ||
|
|
48218cdb97 | ||
|
|
e9fade72f3 | ||
|
|
0f2c0d335b | ||
|
|
40b077bc7e | ||
|
|
a931092cb3 | ||
|
|
bd3749ed69 | ||
|
|
4ffbf77886 | ||
|
|
781a7ef60a | ||
|
|
5b2949ee0b | ||
|
|
a0d646135a | ||
|
|
7862ad88b7 | ||
|
|
f3bff94cf9 | ||
|
|
0eba1e1782 | ||
|
|
e3216b82bf | ||
|
|
da419e2332 | ||
|
|
0d97ef43be | ||
|
|
1a2313a6f2 | ||
|
|
250a9bdfe2 | ||
|
|
6317a3e9da | ||
|
|
7ab7c9e932 | ||
|
|
e129c5bc0d | ||
|
|
2e241242a3 | ||
|
|
9724e5d336 | ||
|
|
63a562f95e | ||
|
|
5c340b0387 | ||
|
|
1c6510f57a | ||
|
|
2a15a98a6a | ||
|
|
72a406e7aa | ||
|
|
feccc3ff37 | ||
|
|
265bfa2c79 | ||
|
|
8faf9b9b41 | ||
|
|
84be7c230c | ||
|
|
3e675fabe0 | ||
|
|
cd5b4b0bc2 | ||
|
|
7ef822021b | ||
|
|
9a48926a57 | ||
|
|
13cd97f3df | ||
|
|
183139340b | ||
|
|
1c69bca258 | ||
|
|
c10ea454dc | ||
|
|
9504fc21b5 | ||
|
|
13d8fbef30 | ||
|
|
b8988b63a6 | ||
|
|
5eaaeb7c31 | ||
|
|
c4f8c453ae | ||
|
|
6f4ba54079 | ||
|
|
637570326b | ||
|
|
37f885650c | ||
|
|
c8c34ccb20 | ||
|
|
e765ed3a9c | ||
|
|
677063594e | ||
|
|
59c7cbd482 | ||
|
|
570311610e | ||
|
|
41b264e77c | ||
|
|
df4bd0d53f | ||
|
|
7f09a662a0 | ||
|
|
4f3b21e1c7 | ||
|
|
54233c9080 | ||
|
|
db8e13ef71 | ||
|
|
5a42414b9c | ||
|
|
9c665ab72e | ||
|
|
b665ba6aa6 | ||
|
|
ec5913b5cd | ||
|
|
25ac63ed71 | ||
|
|
99209c2916 | ||
|
|
1fbaa0a521 | ||
|
|
3037b91e05 | ||
|
|
ffdf972b91 | ||
|
|
459e5fbd5f | ||
|
|
bfc993cc91 | ||
|
|
4432db35d9 | ||
|
|
591ab1dff9 | ||
|
|
5bca2424bc | ||
|
|
bd61a9e770 | ||
|
|
3438e7acd2 | ||
|
|
09c200acf2 | ||
|
|
716889cab1 | ||
|
|
409693984f | ||
|
|
04e8c11080 | ||
|
|
80af2b73ab | ||
|
|
3cc57f9645 | ||
|
|
a65d4e7f14 | ||
|
|
b531cfc019 | ||
|
|
543ec2136b | ||
|
|
93b5071f73 | ||
|
|
ddc369f073 | ||
|
|
1b40dc92eb | ||
|
|
fcc3e6138b | ||
|
|
9fe6ef7ab2 | ||
|
|
c010af6f19 | ||
|
|
35b7982303 | ||
|
|
f311cfa231 | ||
|
|
80970e531b | ||
|
|
b7bb76df05 | ||
|
|
98c70d6fc7 | ||
|
|
ab84349b16 | ||
|
|
03091e372f | ||
|
|
4d17184817 | ||
|
|
e086e0eb6c | ||
|
|
314368c822 | ||
|
|
c5181ab410 | ||
|
|
ea5152cae1 | ||
|
|
255fca5eea | ||
|
|
4aeccadf4e | ||
|
|
93540ee10e | ||
|
|
8fb3ac3649 | ||
|
|
77b2986b5b | ||
|
|
62b013df0d | ||
|
|
fad6768bd1 | ||
|
|
a78125f925 | ||
|
|
a00a8bcc8a | ||
|
|
1e9a9e167d | ||
|
|
3da0db62e6 | ||
|
|
e14ced7918 | ||
|
|
ab9d02f53b | ||
|
|
a461a11989 | ||
|
|
1bd838608f | ||
|
|
365577f567 | ||
|
|
50efb383f0 | ||
|
|
5da6bd0083 | ||
|
|
5e9a033e6e | ||
|
|
fb7cb6823e | ||
|
|
dd0a58f5f0 | ||
|
|
a21420389e | ||
|
|
6140baf4e1 | ||
|
|
8fc642eb5b | ||
|
|
e66e1a0046 | ||
|
|
d5c69f1da4 | ||
|
|
f13b1e7d7f | ||
|
|
5c8a3f862a | ||
|
|
8807f1277f | ||
|
|
a3b9157f49 | ||
|
|
b88ba05356 | ||
|
|
b74d505577 | ||
|
|
9e2d7dca87 | ||
|
|
d236b37ac9 | ||
|
|
e880c66bd8 | ||
|
|
383456aa29 | ||
|
|
1a13940c8d | ||
|
|
3d54788495 | ||
|
|
71d53ace2f | ||
|
|
f37e3f99f0 | ||
|
|
bd03ffc16e | ||
|
|
1ac1af9b47 | ||
|
|
3bf5705316 | ||
|
|
1c2528c8a3 | ||
|
|
7bd15b1a03 | ||
|
|
6b961a85fd | ||
|
|
7707004043 | ||
|
|
a025d3c5a5 | ||
|
|
c460bdd56b | ||
|
|
b81a359eb6 | ||
|
|
d61aefb24c | ||
|
|
d305dd73a3 | ||
|
|
93a16ba238 | ||
|
|
4f7cea6c53 | ||
|
|
afbdd3acc3 | ||
|
|
85d5866177 | ||
|
|
9789d7535d | ||
|
|
d8443cd3f7 | ||
|
|
d47c26e168 | ||
|
|
01561da142 | ||
|
|
0af25f784b | ||
|
|
b9b42f2ea0 | ||
|
|
311c393838 | ||
|
|
18c1c42405 | ||
|
|
37dd5d4629 | ||
|
|
81975f4693 | ||
|
|
b8b928d5cb | ||
|
|
3eff81fbf7 | ||
|
|
785521bf4f | ||
|
|
6d1a55a521 | ||
|
|
9cad27008b | ||
|
|
11e611a7fa | ||
|
|
72c1f8de06 | ||
|
|
6e99868e4c | ||
|
|
4d278fde64 | ||
|
|
f21e915fb9 | ||
|
|
6f53c63df6 | ||
|
|
1def5f359e | ||
|
|
15ec669374 | ||
|
|
a3fa5da496 | ||
|
|
30965ac66a | ||
|
|
09ab40b7d1 | ||
|
|
edab9dbf4d | ||
|
|
9868ea4936 | ||
|
|
85920dd01d | ||
|
|
fa15607773 | ||
|
|
a91a2c1a83 | ||
|
|
16e7711e22 | ||
|
|
5cda4eda72 | ||
|
|
98f000409f | ||
|
|
bd7fe0cf66 | ||
|
|
48246541da | ||
|
|
4a8d4a53b1 | ||
|
|
4cd95bcbc3 | ||
|
|
be24c8697f | ||
|
|
0d93378887 | ||
|
|
4069766c52 | ||
|
|
7010577720 | ||
|
|
8ac27a68e6 | ||
|
|
46312e0b46 | ||
|
|
f9216ed6ad | ||
|
|
65bf37ef83 | ||
|
|
f740fae2a4 | ||
|
|
fbc503d696 | ||
|
|
662435f728 | ||
|
|
163d966707 | ||
|
|
85729c51af | ||
|
|
360e1ca5cc | ||
|
|
a1f2a06b34 | ||
|
|
c84dd8a90d | ||
|
|
65469a7f8b | ||
|
|
6b597516c1 | ||
|
|
b5857f62e2 | ||
|
|
a504ced097 | ||
|
|
1db5fbcfe3 | ||
|
|
59b8ab5834 | ||
|
|
a568180441 | ||
|
|
85e80f71cd | ||
|
|
bfa6bdcd8b | ||
|
|
03cd72b007 | ||
|
|
5bfd430f81 | ||
|
|
73fac4e911 | ||
|
|
8fb474fb17 | ||
|
|
f813928e4b | ||
|
|
b9c7a97318 | ||
|
|
9fb2f1cd6d | ||
|
|
6ca7732d5e | ||
|
|
b0ab0fac49 | ||
|
|
a294bce82f | ||
|
|
76d1466b08 | ||
|
|
1888d3f7b3 | ||
|
|
c2787701cc | ||
|
|
52e1d0ccc4 | ||
|
|
10e3c4c221 | ||
|
|
68f2d273bf | ||
|
|
7c86c21662 | ||
|
|
ae1580d790 | ||
|
|
3215c50f25 | ||
|
|
36f73e8044 | ||
|
|
a4f3d779db | ||
|
|
d9aa2b784d | ||
|
|
cffcbc02de | ||
|
|
9347fddbfc | ||
|
|
037e9437e4 | ||
|
|
36e7a4ca2e | ||
|
|
ae6423d704 | ||
|
|
7105440cec | ||
|
|
c80b9cd280 | ||
|
|
171ca612af | ||
|
|
c3d64fc1b3 | ||
|
|
7c24ce225d | ||
|
|
08b38d5401 | ||
|
|
024c53694d | ||
|
|
7e6011101f | ||
|
|
c40feaba77 | ||
|
|
5277f09dfc | ||
|
|
2d30521ab9 | ||
|
|
050fa43561 | ||
|
|
f36f92f4da | ||
|
|
124f3bc67d | ||
|
|
d304209a85 | ||
|
|
8367d3f3cb | ||
|
|
c56d7d899d | ||
|
|
ea5db8469e | ||
|
|
3811c567e7 | ||
|
|
8708d76425 | ||
|
|
054fe3cc40 | ||
|
|
af0d11f244 | ||
|
|
9650885be9 | ||
|
|
596ac6e31f | ||
|
|
612ee37365 | ||
|
|
442c37b7a9 | ||
|
|
04bbe41330 | ||
|
|
8f84f57183 | ||
|
|
6a78740211 | ||
|
|
c0e1a415fd | ||
|
|
bf8f082a90 | ||
|
|
2f543a2142 | ||
|
|
7e5db8c930 | ||
|
|
f7a211dcc8 | ||
|
|
845734773d | ||
|
|
347de4931c | ||
|
|
8829650513 | ||
|
|
c73fae1e2e | ||
|
|
834bf069d2 | ||
|
|
c06a9fa34f | ||
|
|
753fad4adc | ||
|
|
34814eb66e | ||
|
|
3a5bcd0326 | ||
|
|
99c2398bc6 | ||
|
|
28f1272870 | ||
|
|
f18e3a2fc0 | ||
|
|
c4c5dc27cb | ||
|
|
2caf182f37 | ||
|
|
43f244b6d5 | ||
|
|
1309b396d0 | ||
|
|
ba61796458 | ||
|
|
3255fe7141 | ||
|
|
e98b8e79ea | ||
|
|
196121c51b | ||
|
|
5269028951 | ||
|
|
f7bc056b5a | ||
|
|
a0f7198544 | ||
|
|
dd8930684e | ||
|
|
bdb186f3b0 | ||
|
|
64f9baa084 | ||
|
|
b29231c040 | ||
|
|
6128bf07a9 | ||
|
|
2ec19e9558 | ||
|
|
9ddb6925bf | ||
|
|
12931e1c6e | ||
|
|
41c23b0da5 | ||
|
|
2578ab19e4 | ||
|
|
d87ec897e9 | ||
|
|
3bd4bffb1c | ||
|
|
c36b09a502 | ||
|
|
641eb10d34 | ||
|
|
955c5505e7 | ||
|
|
69319969de | ||
|
|
a14292e848 | ||
|
|
5d678df64a | ||
|
|
8ca8cbe2bd | ||
|
|
ba322d8209 | ||
|
|
2f38289b79 | ||
|
|
f23a3ca699 | ||
|
|
77d2b106cc | ||
|
|
c0e46412e9 | ||
|
|
0161353d7d | ||
|
|
2b4ecde2c8 | ||
|
|
b3a286d69d | ||
|
|
467d3c9a0c | ||
|
|
ad5747bad1 | ||
|
|
d6eb66ed3c | ||
|
|
7f2a9f1b49 | ||
|
|
1e1896f2de | ||
|
|
c831973366 | ||
|
|
1a2548d9e9 | ||
|
|
3900eec27c | ||
|
|
a02d212638 | ||
|
|
9c91a8fa70 | ||
|
|
41469f335e | ||
|
|
67ce4f8820 | ||
|
|
bc63d56cca | ||
|
|
c893d70805 | ||
|
|
3ee6e02564 | ||
|
|
e3aaace400 | ||
|
|
300753a069 | ||
|
|
f13b88c616 | ||
|
|
60ca389c64 | ||
|
|
1b0f3919c1 | ||
|
|
6a348cf7d5 | ||
|
|
9e91449c8d | ||
|
|
25e5ebf382 | ||
|
|
7dfc356625 | ||
|
|
58ba6c0160 | ||
|
|
f076b63821 | ||
|
|
12f0454cd6 | ||
|
|
cd7342755f | ||
|
|
9bb8e0a3f9 | ||
|
|
1a6373ef39 | ||
|
|
f6c24009be | ||
|
|
d862042301 | ||
|
|
23d9ded655 | ||
|
|
4c1a017e69 | ||
|
|
ee623d9247 | ||
|
|
330537d08a | ||
|
|
2cf0ecac7b | ||
|
|
d200b11c7e | ||
|
|
d0eca21021 | ||
|
|
c1147c05e1 | ||
|
|
55898ad2cf | ||
|
|
a465808592 | ||
|
|
5c4862bad4 | ||
|
|
995029a142 | ||
|
|
a57b562cff | ||
|
|
531572578e | ||
|
|
3a4cca687f | ||
|
|
7d3d06a16c | ||
|
|
c21b1fbeeb | ||
|
|
f920ce295e | ||
|
|
7a7bd19c45 | ||
|
|
8f4b58d70e | ||
|
|
3fd45e03bf | ||
|
|
869b4aeff4 | ||
|
|
cc9ca3ba6e | ||
|
|
ea71034bd3 | ||
|
|
9fffd0469f | ||
|
|
ae7773942e | ||
|
|
469a64cebf | ||
|
|
aae3fdcfae | ||
|
|
6a66904f8e | ||
|
|
78271e3319 | ||
|
|
92bf0bcdf8 | ||
|
|
1283204917 | ||
|
|
6789defea9 | ||
|
|
acf2a6e97b | ||
|
|
8cfb6efe6f | ||
|
|
04edb9caf5 | ||
|
|
044131ba21 | ||
|
|
0a7055c90d | ||
|
|
9e3f19919a | ||
|
|
4a3da4ebdb | ||
|
|
027008b14e | ||
|
|
c6df692466 | ||
|
|
acf757f42e | ||
|
|
dd8982f19c | ||
|
|
654bd52f58 | ||
|
|
a9551e9020 | ||
|
|
4e980275b5 | ||
|
|
c172440ac5 | ||
|
|
e332772531 | ||
|
|
437cac8cc1 | ||
|
|
9f281cacd2 | ||
|
|
748a0fab8a | ||
|
|
c1f06d6307 | ||
|
|
c4e817ce4a | ||
|
|
9a3e5e6955 | ||
|
|
228d30ed06 | ||
|
|
057c0609fc | ||
|
|
17d2712d9c | ||
|
|
fc09240e24 | ||
|
|
146303136f | ||
|
|
96aded8d3d | ||
|
|
2886be15aa | ||
|
|
ca0f500ecf | ||
|
|
29aef5a33c | ||
|
|
9158b2b301 | ||
|
|
0196149c5b | ||
|
|
8f9312c387 | ||
|
|
439b9a9e9b | ||
|
|
8c72beb25e | ||
|
|
1ee94db2d0 | ||
|
|
e77d2975af | ||
|
|
e41b1f7385 | ||
|
|
cd596028d6 | ||
|
|
cc57bd33a8 | ||
|
|
6d593c3276 | ||
|
|
91755ee384 | ||
|
|
0692ef86ef | ||
|
|
439d9be27d | ||
|
|
b80505a409 | ||
|
|
e4c17d7274 | ||
|
|
2c58674e0e | ||
|
|
ef1269fb07 | ||
|
|
e525d9a3df | ||
|
|
20b4492c71 | ||
|
|
dee3f73787 | ||
|
|
d543bdc351 | ||
|
|
c7ff0c6422 | ||
|
|
01c46659c4 | ||
|
|
b04b885271 | ||
|
|
dc35bfd2d5 | ||
|
|
70fca8d694 | ||
|
|
a52c633536 | ||
|
|
7b6c60393e | ||
|
|
83e7a314b4 | ||
|
|
749f2ca044 | ||
|
|
5468ff4d91 | ||
|
|
1d2daaea63 | ||
|
|
52585fd6dc | ||
|
|
c03844a4ec | ||
|
|
6449cd807e | ||
|
|
e2a08185c6 | ||
|
|
5d6677ca28 | ||
|
|
5a8a29cfea | ||
|
|
c1708b89c0 | ||
|
|
83fddfd493 | ||
|
|
1798791df1 | ||
|
|
6ebb0dca9f | ||
|
|
cf8d6ec865 | ||
|
|
f452f72c6b | ||
|
|
3198291f26 | ||
|
|
02c1d5e285 | ||
|
|
ec4161a57d | ||
|
|
03d8d4df38 | ||
|
|
03d2d6d51b | ||
|
|
83fda3c000 | ||
|
|
4fe8495a23 | ||
|
|
a16f6643f0 | ||
|
|
adc0ae3ceb | ||
|
|
7bb3ceb4c7 | ||
|
|
75a4fc5b72 | ||
|
|
87673cd438 | ||
|
|
f345fe9db7 | ||
|
|
e683a48d0e | ||
|
|
a7a14d9586 | ||
|
|
219337990b | ||
|
|
376a770cc4 | ||
|
|
7e500dbd93 | ||
|
|
affd04a45d | ||
|
|
c84130e865 | ||
|
|
4f264c02c7 | ||
|
|
d205476103 | ||
|
|
367cc95aa7 | ||
|
|
206dba27a4 | ||
|
|
dcf53d4408 | ||
|
|
63be3b8989 | ||
|
|
18b4e9e79d | ||
|
|
cb454b333d | ||
|
|
e0d9f85aee | ||
|
|
b04fbd789c | ||
|
|
aad9556414 | ||
|
|
48a1e5141a | ||
|
|
0865f397ae | ||
|
|
796df3c631 | ||
|
|
a28383834b | ||
|
|
3a0d2f520a | ||
|
|
6348ad12a0 | ||
|
|
fe7710cbcc | ||
|
|
2103d038b3 | ||
|
|
6ca85be6f8 | ||
|
|
9f0df77ab1 | ||
|
|
e72c7e4123 | ||
|
|
2b1bd292ae | ||
|
|
71e7da6533 | ||
|
|
80a49d3d7b | ||
|
|
d862a4f94f | ||
|
|
a57e8ce658 | ||
|
|
96a53167fa | ||
|
|
6d2749aac4 | ||
|
|
b1b0b1ca30 | ||
|
|
3dee7826e7 | ||
|
|
c9326b38b8 | ||
|
|
d4f64cabf4 | ||
|
|
fe41ddbb28 | ||
|
|
ee69b99af6 | ||
|
|
767ff0a2d1 | ||
|
|
8604e882a8 | ||
|
|
cc1237f484 | ||
|
|
37f4ce538a | ||
|
|
7d346331b5 | ||
|
|
e1ccc04e9f | ||
|
|
881e6a1f5c | ||
|
|
baeaeffce5 | ||
|
|
c14e88f0f5 | ||
|
|
8940b8608e | ||
|
|
ec82d85acd | ||
|
|
cfb56d1af3 | ||
|
|
1e10802990 | ||
|
|
6695916045 | ||
|
|
7906d199a1 | ||
|
|
1070711d60 | ||
|
|
4b405cfc6e | ||
|
|
e5660ee6ae | ||
|
|
8011fba3ae | ||
|
|
587a9c2749 | ||
|
|
e1554a407d | ||
|
|
3fcfb8e9fa | ||
|
|
384b62028a | ||
|
|
b95aab8482 | ||
|
|
fc2d6abfe7 | ||
|
|
27de5625d4 | ||
|
|
6aa4f54d66 | ||
|
|
222516d97d | ||
|
|
a055469faf | ||
|
|
fdaaaaa878 | ||
|
|
12d1fb5aa9 | ||
|
|
48f00d15b1 | ||
|
|
3e055aa5c3 | ||
|
|
6896a52721 | ||
|
|
5779b3e1fe | ||
|
|
62cd676c74 | ||
|
|
0c17278843 | ||
|
|
d229ee70da | ||
|
|
26e274666d | ||
|
|
ebd46aed51 | ||
|
|
e793f7671c | ||
|
|
c2e64f71d0 | ||
|
|
0920e5830f | ||
|
|
bf7fa94ec7 | ||
|
|
6f58db8982 | ||
|
|
aa42e87340 | ||
|
|
649f7966f7 | ||
|
|
5f0d813d93 | ||
|
|
501f13fbf3 | ||
|
|
ba55168157 | ||
|
|
d79323136f | ||
|
|
08ff6ab07e | ||
|
|
ba655a0e4c | ||
|
|
b59c17e543 | ||
|
|
61ca9a80b3 | ||
|
|
317239b097 | ||
|
|
c2a30b250c | ||
|
|
c994e6bd63 | ||
|
|
3ee2aa7a16 | ||
|
|
083c9df93b | ||
|
|
50789175ed | ||
|
|
dc1b027cd4 | ||
|
|
f353cbdb2f | ||
|
|
73e449b226 | ||
|
|
b4a64c592b | ||
|
|
78111136db | ||
|
|
650ab5beeb | ||
|
|
7932de6352 | ||
|
|
240b9b7a5c | ||
|
|
bb6e38787d | ||
|
|
898c23c03f | ||
|
|
b55ee18ff3 | ||
|
|
e5763a7a7e | ||
|
|
8bb1bdfae9 | ||
|
|
c62b449765 | ||
|
|
bb0aa4cb3c | ||
|
|
d63528c8c7 | ||
|
|
c5db6bb32b | ||
|
|
c8dc41a6e7 | ||
|
|
47e0e1e0e2 | ||
|
|
efcddaebe9 | ||
|
|
5fe5112589 | ||
|
|
564bb5e964 | ||
|
|
2df54b4ba8 | ||
|
|
030aa5d9e7 | ||
|
|
c511f13f22 | ||
|
|
bd3cbe0716 | ||
|
|
fdb2ed7455 | ||
|
|
ba319696a9 | ||
|
|
910c552052 | ||
|
|
cce81f192c | ||
|
|
9d22a7dfb0 | ||
|
|
4f4f642822 | ||
|
|
2875cf01bb | ||
|
|
e205db3bcd | ||
|
|
31d4a6e212 | ||
|
|
aaeb86f682 | ||
|
|
9fa6ea2680 | ||
|
|
a9b6b5cd15 | ||
|
|
a45c0a5d67 | ||
|
|
c8dfe360eb | ||
|
|
4cfaf85c65 | ||
|
|
be5f2c192c | ||
|
|
c9ef44ce29 | ||
|
|
e92d4a11f5 | ||
|
|
f2cbc96c3e | ||
|
|
a69801e2c6 | ||
|
|
034206cec1 | ||
|
|
04e0bac233 | ||
|
|
fbef83f399 | ||
|
|
3d5f7a3947 | ||
|
|
a5fb718c50 | ||
|
|
227d4822ff | ||
|
|
5c4a81d934 | ||
|
|
263255eb8d | ||
|
|
8e2ec95575 | ||
|
|
8e7a9016d5 | ||
|
|
c85f368370 | ||
|
|
a0977064ce | ||
|
|
15aecd8711 | ||
|
|
20dd0b2d20 | ||
|
|
f934860a07 | ||
|
|
2aeb06d6dc | ||
|
|
6ccbb335d2 | ||
|
|
4340decad2 | ||
|
|
f3ff1a3696 | ||
|
|
aa24de39aa | ||
|
|
a798e64c15 | ||
|
|
6a5fa75490 | ||
|
|
8ad6b5ed9f | ||
|
|
d5bb814d34 | ||
|
|
d156a1d981 | ||
|
|
987493aef3 | ||
|
|
8bfa75451b | ||
|
|
c071733fd4 | ||
|
|
cd3063f3fa | ||
|
|
58b1f00d19 | ||
|
|
149f05c7b6 | ||
|
|
8a1b9b068e | ||
|
|
c5a59d9391 | ||
|
|
500b8b41c1 | ||
|
|
be4a824d74 | ||
|
|
ed3958d714 | ||
|
|
6ce08764a1 | ||
|
|
c80ede5f13 | ||
|
|
bc694039e4 | ||
|
|
3462af03e6 | ||
|
|
ea1d5bdcdd | ||
|
|
121c09c7be | ||
|
|
76bfaf6daf | ||
|
|
d89c6e336a | ||
|
|
776dc3992a | ||
|
|
27ca82ebc6 | ||
|
|
385f8ae468 | ||
|
|
b9f030cc26 | ||
|
|
52afb2ac1b | ||
|
|
43bc88903d | ||
|
|
6ef9f88299 | ||
|
|
f71fdb0acc | ||
|
|
c24dfef63c | ||
|
|
6271f1cad9 | ||
|
|
fb4b030aaf | ||
|
|
ff21a8e0ee | ||
|
|
904fffffeb | ||
|
|
51897bb77c | ||
|
|
bd1a281ede | ||
|
|
45598f1578 | ||
|
|
d02115f837 | ||
|
|
34c781a24d | ||
|
|
1302394603 | ||
|
|
dd622d7c4e | ||
|
|
d120e9013f | ||
|
|
b8da6b9fc6 | ||
|
|
4baea47c42 | ||
|
|
176cf9e0c3 | ||
|
|
7b6faddfc8 | ||
|
|
f90ad27375 | ||
|
|
230b2287dd | ||
|
|
754c838903 | ||
|
|
aa2fd59857 | ||
|
|
9932a65370 | ||
|
|
5e4166478d | ||
|
|
b0e87c3110 | ||
|
|
ff0813313a | ||
|
|
c0bdf32a3c | ||
|
|
92b065dc53 | ||
|
|
9298d4e3df | ||
|
|
740a7fcbc8 | ||
|
|
5fbf25a681 | ||
|
|
db6e625005 | ||
|
|
811cacdc2c | ||
|
|
ce08a86462 | ||
|
|
11497d5bba | ||
|
|
0217c78377 | ||
|
|
bd6b25ce0e | ||
|
|
d51a853d5c | ||
|
|
9ed99402f5 | ||
|
|
ec3a6a3137 | ||
|
|
796858a53f | ||
|
|
5b78caca94 | ||
|
|
bec2248141 | ||
|
|
211503c39f | ||
|
|
adb1307b9a | ||
|
|
99673f04bc | ||
|
|
e9a537774d | ||
|
|
367f539769 | ||
|
|
398133cf55 | ||
|
|
52fc3ba405 | ||
|
|
fdd6e18b75 | ||
|
|
58a84b8cb6 | ||
|
|
c5d666d374 | ||
|
|
5d8993b06a | ||
|
|
c758bf9fd7 | ||
|
|
900813a328 | ||
|
|
2bad0e5d20 | ||
|
|
ccc5842bc9 | ||
|
|
fd86c2026d | ||
|
|
e4a8eae701 | ||
|
|
75e51819d0 | ||
|
|
8ee341500d | ||
|
|
0590062925 | ||
|
|
799d88d3d8 | ||
|
|
760aea9a96 | ||
|
|
d6a31b1766 | ||
|
|
0b54a5b10a | ||
|
|
6309cb9b41 | ||
|
|
27a82a1b93 | ||
|
|
ecd1936695 | ||
|
|
76b3c61012 | ||
|
|
0df2dea73b | ||
|
|
f8bb576c4f | ||
|
|
ee61f6f3e2 | ||
|
|
f14f2a6d79 | ||
|
|
2c322cc5d6 | ||
|
|
3b8f3a1504 | ||
|
|
8f9529cd05 | ||
|
|
f4bca0b348 | ||
|
|
6291438073 | ||
|
|
18c3c15391 | ||
|
|
dda620e88c | ||
|
|
d7cc31b63e | ||
|
|
5e3e1c82d8 | ||
|
|
aa80652f47 | ||
|
|
9d247bbd2d | ||
|
|
93e40a7b2f | ||
|
|
03ff2cc1c4 | ||
|
|
a285b6377b | ||
|
|
cd791a5ea0 | ||
|
|
87830900a9 | ||
|
|
dfc9d9f50a | ||
|
|
75311a7e16 | ||
|
|
628bc4d1e7 | ||
|
|
a4c3f48639 | ||
|
|
bdf80aa542 | ||
|
|
adf3c58ad3 | ||
|
|
caf90bfaa5 | ||
|
|
2f985f4bb4 | ||
|
|
67c2bcdf4c | ||
|
|
1d2d0e3ff2 | ||
|
|
9fda6ee39f | ||
|
|
bc3e582fe4 | ||
|
|
bc1fc5ddbc | ||
|
|
63948fc62c | ||
|
|
f4858a7103 | ||
|
|
26886e6140 | ||
|
|
7a1818c99b | ||
|
|
2ccd1b10e5 | ||
|
|
788fa208c8 | ||
|
|
8848314c08 | ||
|
|
c11125f9ed | ||
|
|
95ceeec722 | ||
|
|
b68ff25917 | ||
|
|
3e3327ea17 | ||
|
|
b158bb8693 | ||
|
|
2bf098eda4 | ||
|
|
382e05fa56 | ||
|
|
19b05d886e | ||
|
|
e65566a9cc | ||
|
|
baa3c3f0f6 | ||
|
|
f4f339529c | ||
|
|
7d02fae85b | ||
|
|
6e46c3f1fd | ||
|
|
c7e675940c | ||
|
|
d26b1317ed | ||
|
|
a221f22969 | ||
|
|
817f786fbb | ||
|
|
62420c73cb | ||
|
|
2522a0b7da | ||
|
|
46d32a12c9 | ||
|
|
c491418526 | ||
|
|
c067545c17 | ||
|
|
823a155293 | ||
|
|
324b2c78fa | ||
|
|
d34f98289b | ||
|
|
644096b15c | ||
|
|
15cebcc363 | ||
|
|
faa4ea68c0 | ||
|
|
29a9385ff0 | ||
|
|
476eae0c2a | ||
|
|
8399267671 | ||
|
|
db546cf87f | ||
|
|
317639758a | ||
|
|
fdbabca85f | ||
|
|
6f790e5821 | ||
|
|
6f5cdeb611 | ||
|
|
9eb4f404cb | ||
|
|
f58487b392 | ||
|
|
5b9aefef77 | ||
|
|
defaf19f5d | ||
|
|
772fd5cc44 | ||
|
|
50a0f6df7e | ||
|
|
9f435c5f1c | ||
|
|
931e2d1d26 | ||
|
|
a42419da42 | ||
|
|
9a237b776c | ||
|
|
02ec32a1ef | ||
|
|
a1e9e6440f | ||
|
|
5878e6398c | ||
|
|
6c6f1408f2 | ||
|
|
b7a7319c38 | ||
|
|
68f705cac5 | ||
|
|
079d1dcd80 | ||
|
|
7b24bbdf49 | ||
|
|
f86d543ebb | ||
|
|
60e47a2699 | ||
|
|
b8bc7a696b | ||
|
|
7d900ef1bf | ||
|
|
1931a73f39 | ||
|
|
966ea3aebd | ||
|
|
b3013681ff | ||
|
|
416c7fcbce | ||
|
|
e83eebb12f | ||
|
|
a349873226 | ||
|
|
fccae2b911 | ||
|
|
3ee08848db | ||
|
|
0129b4dd45 | ||
|
|
1c57e7f1f4 | ||
|
|
d0caf3a11e | ||
|
|
a87bb090d9 | ||
|
|
beb95e7781 | ||
|
|
5435d7af91 | ||
|
|
0c0a70f4c6 | ||
|
|
e3947e2b7f | ||
|
|
da3f7fb7f8 | ||
|
|
429ddfd38d | ||
|
|
479514d015 | ||
|
|
355e41466d | ||
|
|
03d9aad87c | ||
|
|
3e2bcf530b | ||
|
|
6343a5f68e | ||
|
|
00de9a9828 | ||
|
|
7fc2cd819e | ||
|
|
974739aab5 | ||
|
|
0cc4f8e385 | ||
|
|
513fd2a872 | ||
|
|
ae6986fb89 | ||
|
|
e8e28989eb | ||
|
|
0fa629d05b | ||
|
|
ff7a07d5c4 | ||
|
|
5a18403057 | ||
|
|
1b7b1d6eac | ||
|
|
23cfa4ae45 | ||
|
|
e82def52a9 | ||
|
|
bcfe9db299 | ||
|
|
cf00ae7640 | ||
|
|
f9b9e88646 | ||
|
|
c2500434c3 | ||
|
|
f74b341dde | ||
|
|
461b00f34a | ||
|
|
4cda41ac7b | ||
|
|
6a1c4fbfcb | ||
|
|
31424c126f | ||
|
|
53096539dc | ||
|
|
2c0b475235 | ||
|
|
a542405200 | ||
|
|
3e2b085ef9 | ||
|
|
885e4384a1 | ||
|
|
2b8f151094 | ||
|
|
5ac71f0b27 | ||
|
|
39ac7c9435 | ||
|
|
ed7bdc8a90 | ||
|
|
55f0cab3a3 | ||
|
|
544dec6298 | ||
|
|
e0ae1814b1 | ||
|
|
9532d72371 | ||
|
|
1362bbbb4b | ||
|
|
f00fd51dae | ||
|
|
a8896c5ac2 | ||
|
|
5d3808524d | ||
|
|
c8f167823f | ||
|
|
70f6796e7d | ||
|
|
85d253af6b | ||
|
|
a86cbf5876 | ||
|
|
3f1399de8a | ||
|
|
1f809a8560 | ||
|
|
653d14e2f9 | ||
|
|
85fab7e47b | ||
|
|
3aa9176f08 | ||
|
|
33b53b6021 | ||
|
|
3f7421b71b | ||
|
|
ee45625290 | ||
|
|
2c2a42587b | ||
|
|
e2f65efcf9 | ||
|
|
081d6e4784 | ||
|
|
1d4247f64e | ||
|
|
1ff30d7b79 | ||
|
|
16ea817968 | ||
|
|
a2a4bae929 | ||
|
|
c58843b3a1 | ||
|
|
a22524b004 | ||
|
|
87c4c21e75 | ||
|
|
b9465395cb | ||
|
|
edf41477f0 | ||
|
|
5f627b4448 | ||
|
|
60e5428925 | ||
|
|
748ec66725 | ||
|
|
e54a3a2f01 | ||
|
|
0e4cb4f406 | ||
|
|
f7ffe72ac7 | ||
|
|
cd58dc3e56 | ||
|
|
c33bcf2051 | ||
|
|
7642c08763 | ||
|
|
fdc8000810 | ||
|
|
a91c9b15e3 | ||
|
|
27d67ea2ba | ||
|
|
d6a8160902 | ||
|
|
6e1b9395c6 | ||
|
|
b1ccbed3d4 | ||
|
|
37381350f8 | ||
|
|
7af808a5ef | ||
|
|
876bef5937 | ||
|
|
a16af51873 | ||
|
|
dc9a441bfa | ||
|
|
ee6dfe8308 | ||
|
|
2cb5b03e53 | ||
|
|
964b190350 | ||
|
|
13d27a42cc | ||
|
|
ec05fee43a | ||
|
|
b50e3bc67f | ||
|
|
ac78b5e97b | ||
|
|
17e0d63957 | ||
|
|
9209fe3878 | ||
|
|
84d84211ac | ||
|
|
b4116dcdd5 | ||
|
|
bb18d787b5 | ||
|
|
0647084f39 | ||
|
|
734ea11e3c | ||
|
|
3940450878 | ||
|
|
ccbfaa83b0 | ||
|
|
d86007873e | ||
|
|
4b7df0d30c | ||
|
|
caff59499c | ||
|
|
99a0f9824a | ||
|
|
3013bbb27d | ||
|
|
6f9b54933f | ||
|
|
1bbe317508 | ||
|
|
e97a534f13 | ||
|
|
8acb83d993 | ||
|
|
71b640cc5b | ||
|
|
4f026fafbc | ||
|
|
39f594d660 | ||
|
|
cae97f6521 | ||
|
|
6cbf345f28 | ||
|
|
a0ab29f8a1 | ||
|
|
4a4fbfc967 | ||
|
|
408b5839b1 | ||
|
|
60620368d7 | ||
|
|
4927de4f86 | ||
|
|
bad5c1a303 | ||
|
|
6f18cc9abc | ||
|
|
4d144be8b0 | ||
|
|
2128b696b8 | ||
|
|
a23669220a | ||
|
|
051c46256b | ||
|
|
d5524947b5 | ||
|
|
74f91c4af7 | ||
|
|
da4d4191a9 | ||
|
|
2564300e55 | ||
|
|
cb0713d2c9 | ||
|
|
ac265bef1e | ||
|
|
4a0132c570 | ||
|
|
1fa174692a | ||
|
|
04c9544187 | ||
|
|
8085fc15cc | ||
|
|
2f15832f56 | ||
|
|
1557ed153c | ||
|
|
a6620ac28d | ||
|
|
89e36657cc | ||
|
|
7129bed51b | ||
|
|
1cc79574fc | ||
|
|
20e35880bf | ||
|
|
5e1912cfc1 | ||
|
|
293f0f39ce | ||
|
|
0db261ba56 | ||
|
|
7668a2c5cb | ||
|
|
26c06f0c51 | ||
|
|
23d3608c6b | ||
|
|
baa7081d68 | ||
|
|
19bf2b4e88 | ||
|
|
6a1b20de2a | ||
|
|
3c864e930d | ||
|
|
dc5596ff54 | ||
|
|
46d9760f5e | ||
|
|
90d71d3f08 | ||
|
|
e9404524cc | ||
|
|
dc65a213fd | ||
|
|
4237ba10dc | ||
|
|
c3f3b29b92 | ||
|
|
1c985da0ca | ||
|
|
7a60322abf | ||
|
|
07bc9a3530 | ||
|
|
a099965bad | ||
|
|
146323a7f8 | ||
|
|
57e086dcea | ||
|
|
2101f5d4cc | ||
|
|
cc8c9281e6 | ||
|
|
cf372f0778 | ||
|
|
34bc0ae667 | ||
|
|
2865cf0419 | ||
|
|
58c1f6f0a7 | ||
|
|
7c7a0d395c | ||
|
|
8bdcb436f9 | ||
|
|
ff815fe65a | ||
|
|
da3a2d8137 | ||
|
|
13dcfd41bd | ||
|
|
e56190b378 | ||
|
|
a79553f39f | ||
|
|
b3efb3ebae | ||
|
|
68d301ffd4 | ||
|
|
3b0bec8d11 | ||
|
|
412c617d0f | ||
|
|
751536f5c8 | ||
|
|
025f30ba38 | ||
|
|
0d2fb1d193 | ||
|
|
82b34105d3 | ||
|
|
73aeb2dc56 | ||
|
|
c6973bd412 | ||
|
|
f8780e6d11 | ||
|
|
e2f89ec7aa | ||
|
|
62651c556a | ||
|
|
bf94e38d3d | ||
|
|
4f97852316 | ||
|
|
16040f46d6 | ||
|
|
d068ba24f3 | ||
|
|
f5e43bc695 | ||
|
|
6a5308ab49 | ||
|
|
63e0f29564 | ||
|
|
42bdd9d051 | ||
|
|
4e40de6e2a | ||
|
|
0fa2b899d1 | ||
|
|
f17e4c9c28 | ||
|
|
807962f4a1 | ||
|
|
9c1aa1d668 | ||
|
|
69f491f14e | ||
|
|
cb007f47c1 | ||
|
|
9abd500a74 | ||
|
|
cf68bcaeff | ||
|
|
cbe2bd914d | ||
|
|
75111274ed | ||
|
|
624dcebff6 | ||
|
|
9684f17cde | ||
|
|
e52a40abf7 | ||
|
|
0daa05961b | ||
|
|
158731f83e | ||
|
|
24270b0301 | ||
|
|
3c1b81b957 | ||
|
|
45c24df512 | ||
|
|
bf671b605e | ||
|
|
09c82fbc9a | ||
|
|
3bca0409fe | ||
|
|
d6f78a354d | ||
|
|
e0b9d47387 | ||
|
|
f8795e102b | ||
|
|
4bb4a18876 | ||
|
|
8560c61842 | ||
|
|
a81bbebf44 | ||
|
|
72e3ffeb74 | ||
|
|
2fc9f2b41d | ||
|
|
5f3544baa3 | ||
|
|
da27660014 | ||
|
|
b8a6114309 | ||
|
|
774e208f94 | ||
|
|
f20b52778b | ||
|
|
83e865a370 | ||
|
|
b89a938687 | ||
|
|
e89a2aabed | ||
|
|
f58766ce5c | ||
|
|
15644a40df | ||
|
|
d4800f3c3f | ||
|
|
09a5dd2d3b | ||
|
|
819039ee63 | ||
|
|
ce36339575 | ||
|
|
684712076f | ||
|
|
603c92080f | ||
|
|
16ae61f655 | ||
|
|
0ef4d4ab7e | ||
|
|
4542535f94 | ||
|
|
6a52eed80e | ||
|
|
acf5cbfe93 | ||
|
|
8d1c8cae9c | ||
|
|
c84890f708 | ||
|
|
6d0886204a | ||
|
|
04d02a9d57 | ||
|
|
6ac4e8065a | ||
|
|
b82f815f37 | ||
|
|
158f8cadc0 | ||
|
|
7d70cf4157 | ||
|
|
6591fdf51f | ||
|
|
47d7c64274 | ||
|
|
db175341c7 | ||
|
|
9ff6772790 | ||
|
|
5f9b83944d | ||
|
|
f6735be4da | ||
|
|
6a3e0103bb | ||
|
|
0b5cc1983e | ||
|
|
1a9f8b1ad4 | ||
|
|
754f0008ec | ||
|
|
7115599121 | ||
|
|
0df23ba9f9 | ||
|
|
58daf5ebed | ||
|
|
1a7c6c69d3 | ||
|
|
045c48847a | ||
|
|
e638e83662 | ||
|
|
90644a6843 | ||
|
|
d958fa9ff9 | ||
|
|
ebb6419960 | ||
|
|
122c2f87c1 | ||
|
|
a154eb3d15 | ||
|
|
81028ff9eb | ||
|
|
e8df5cee12 | ||
|
|
ab07963b5c | ||
|
|
7e26084d09 | ||
|
|
4349c07dd7 | ||
|
|
1139a54d9b | ||
|
|
b128c9ed68 | ||
|
|
2415951ead | ||
|
|
995ad69c54 | ||
|
|
225e4b9633 | ||
|
|
9776bc7f57 | ||
|
|
e703fc66c2 | ||
|
|
39c52bbd32 | ||
|
|
6219802165 | ||
|
|
8b97115358 | ||
|
|
810fb84d5e | ||
|
|
5f5e993dc6 | ||
|
|
191cc41ba4 | ||
|
|
abe70fa044 | ||
|
|
7f142293df | ||
|
|
d4e06d4a83 | ||
|
|
ecd7ea1e6b | ||
|
|
b92c548693 | ||
|
|
6ce2c6783b | ||
|
|
29f400b97d | ||
|
|
eecd6a467d | ||
|
|
dce2a3cf9e | ||
|
|
9095aa38ac | ||
|
|
0403b06985 | ||
|
|
de9bd74bc2 | ||
|
|
233d37fb6b | ||
|
|
c627f7d48c | ||
|
|
163c8babaa | ||
|
|
6708542099 | ||
|
|
ea2ee40357 | ||
|
|
62d8b56655 | ||
|
|
c492970b4b | ||
|
|
ac5633592a | ||
|
|
706d7d4ee7 | ||
|
|
752c8c9b76 | ||
|
|
b1399a144d | ||
|
|
05177b34a6 | ||
|
|
c41a9650c3 | ||
|
|
df015c69ea | ||
|
|
1434bffa1f | ||
|
|
94aa25b995 | ||
|
|
d128cfe393 | ||
|
|
954f36f890 | ||
|
|
19e92770c9 | ||
|
|
95c673a148 | ||
|
|
a196a53265 | ||
|
|
3266f0c68e | ||
|
|
1940fadd53 | ||
|
|
03fd72d996 | ||
|
|
f2b44a2513 | ||
|
|
c522adb1f0 | ||
|
|
7160532d41 | ||
|
|
4e62ebe250 | ||
|
|
4472f84f0c | ||
|
|
b766eb2707 | ||
|
|
10a404c335 | ||
|
|
c056efa2e3 | ||
|
|
283ac8d592 | ||
|
|
313d4572ce | ||
|
|
42939b6129 | ||
|
|
37ea8164d3 | ||
|
|
8c810a7db3 | ||
|
|
248a0b890f | ||
|
|
96b7c7fe3f | ||
|
|
e987e91fcc | ||
|
|
cb6444e197 | ||
|
|
93b8a10e3b | ||
|
|
4207558e8b | ||
|
|
ad0d800fc3 | ||
|
|
e232f787f6 | ||
|
|
155f9550c0 | ||
|
|
72476fcc42 | ||
|
|
29e950f7c8 | ||
|
|
7c8ea53b96 | ||
|
|
dcddc10a50 | ||
|
|
a1008af412 | ||
|
|
61c0663c1e | ||
|
|
81a7a521c5 | ||
|
|
e293711802 | ||
|
|
ceb3367320 | ||
|
|
a03aaaed2e | ||
|
|
e075a44afb | ||
|
|
8865bdeb37 | ||
|
|
3aa578cad2 | ||
|
|
d3b5101a91 | ||
|
|
5c32110114 | ||
|
|
24144e3b8d | ||
|
|
b3034f9df7 | ||
|
|
4c6d2ff8dc | ||
|
|
faf3494894 | ||
|
|
535a66ef66 | ||
|
|
5c40bba82f | ||
|
|
855dc479c2 | ||
|
|
0792d5634e | ||
|
|
e91cdcae1a | ||
|
|
27e1400f55 | ||
|
|
e0938e7731 | ||
|
|
b72823a0a4 | ||
|
|
673cf0e773 | ||
|
|
f8aace93cd | ||
|
|
80310134e0 | ||
|
|
4d2d638df4 | ||
|
|
0e44f90e18 | ||
|
|
15938ab67a | ||
|
|
ab4ee31eb1 | ||
|
|
b061ea6e9f | ||
|
|
4aae94f9d0 | ||
|
|
acda92f6bc | ||
|
|
ddfd0f2727 | ||
|
|
d0720e7118 | ||
|
|
4e262a8838 | ||
|
|
b9ed3af343 | ||
|
|
63c9b2c1d9 | ||
|
|
65f3a228b1 | ||
|
|
3004ae2c3a | ||
|
|
d9836a5917 | ||
|
|
be64b5b098 | ||
|
|
c3e74731c2 | ||
|
|
c920d7f00d | ||
|
|
0bbf12239c | ||
|
|
70d68eb46f | ||
|
|
c553fe5d29 | ||
|
|
f0c3d729d7 | ||
|
|
1cdedfee10 | ||
|
|
93129d9442 | ||
|
|
e8c8653e9d | ||
|
|
fab89c67c5 | ||
|
|
3d960a22fa | ||
|
|
51bbb084d3 | ||
|
|
2c25a2bd29 | ||
|
|
355682be01 | ||
|
|
00e9d396ab | ||
|
|
14d4e90eb1 | ||
|
|
b74e86f48a | ||
|
|
3d36cea4ac | ||
|
|
380b822003 | ||
|
|
b66e699877 | ||
|
|
27f8b0994e | ||
|
|
e311b6389a | ||
|
|
fab6d4c048 | ||
|
|
4ffc31033e | ||
|
|
c1777d5cb3 | ||
|
|
9e1a5b8455 | ||
|
|
784b6d3a9b | ||
|
|
c66bdc4869 | ||
|
|
2514d2635e | ||
|
|
8bcc875676 | ||
|
|
5f6a1245ff | ||
|
|
f3a3407226 | ||
|
|
598c218f7b | ||
|
|
4698b14b76 | ||
|
|
835a22ef3f | ||
|
|
7d4111ed14 | ||
|
|
d37cab2a9d | ||
|
|
d16abf434a | ||
|
|
a8363f3ab7 | ||
|
|
010cd3a3ee | ||
|
|
b9042def9d | ||
|
|
aa79ac0c82 | ||
|
|
88125905cf | ||
|
|
dd60be2bf9 | ||
|
|
119b3caa46 | ||
|
|
49f0da7ae1 | ||
|
|
2cead7e7bc | ||
|
|
9262867e86 | ||
|
|
b9272e8f8f | ||
|
|
021a0db8f7 | ||
|
|
e1e8b6897b | ||
|
|
53d1cd1f77 | ||
|
|
cad985ab4d | ||
|
|
c52331f30c | ||
|
|
42e1ff8665 | ||
|
|
2c64b8ba63 | ||
|
|
42e12102a9 | ||
|
|
6127693ed9 | ||
|
|
71069d2157 | ||
|
|
f3391db889 | ||
|
|
9b32eca3ce | ||
|
|
ec06f0f610 | ||
|
|
e6c9c8f6ee | ||
|
|
85b9275517 | ||
|
|
dfd5313afd | ||
|
|
be53e2a737 | ||
|
|
a1c68b9ef2 | ||
|
|
4d46c1c68c | ||
|
|
d6f714f321 | ||
|
|
8569f3d629 | ||
|
|
fed5d03260 | ||
|
|
6adeffa7c6 | ||
|
|
b244b5c3f9 | ||
|
|
f42c190769 | ||
|
|
c9bf41145f | ||
|
|
5239075bb6 | ||
|
|
84437adfa3 | ||
|
|
732ea2f09b | ||
|
|
aff2f4f4f5 | ||
|
|
3b9f631c41 | ||
|
|
3ba098a6a5 | ||
|
|
0cd64bd077 | ||
|
|
1394646a0a | ||
|
|
61ee5aeb73 | ||
|
|
07e378fa18 | ||
|
|
e07e931375 | ||
|
|
480b7c32a9 | ||
|
|
f56875f271 | ||
|
|
92120217eb | ||
|
|
37eddd3143 | ||
|
|
02a12f9fe6 | ||
|
|
6fcd6e0e21 | ||
|
|
0857baade3 | ||
|
|
469d4c8968 | ||
|
|
23ad44b57b | ||
|
|
f48d3e9bbc | ||
|
|
fbf94a7815 | ||
|
|
1921b24551 | ||
|
|
28e614de5c | ||
|
|
cd9ad1d7e8 | ||
|
|
162f54eca6 | ||
|
|
33a266f4ba | ||
|
|
6b592d93a2 | ||
|
|
4686ae4b64 | ||
|
|
8d05f2c16a | ||
|
|
a4bb83956c | ||
|
|
eb5376044c | ||
|
|
3cbcff8a2d | ||
|
|
e983cf5277 | ||
|
|
0ab1ca5501 | ||
|
|
4baafa229d | ||
|
|
7f3e33a147 | ||
|
|
b7558d9881 | ||
|
|
a0f59cdcb4 | ||
|
|
a4bc433619 | ||
|
|
b6b70730bf | ||
|
|
6a68bb574a | ||
|
|
0cf166ad4f | ||
|
|
2707b50ffe | ||
|
|
939fe70de0 | ||
|
|
89c15fe0b3 | ||
|
|
ec5f601670 | ||
|
|
8caa0c9779 | ||
|
|
e2548b5b25 | ||
|
|
bbefcf04bf | ||
|
|
c7b0add86f | ||
|
|
a0155d93d9 | ||
|
|
00d9ef0b70 | ||
|
|
0cc8888038 | ||
|
|
c735450e07 | ||
|
|
71f8c7ce7a | ||
|
|
5fee0eeac0 | ||
|
|
eb4157fd17 | ||
|
|
69ede8ef81 | ||
|
|
609a61e3e6 | ||
|
|
bf951c5e29 | ||
|
|
af63fed7d8 | ||
|
|
68d1d41c03 | ||
|
|
3deed1e91a | ||
|
|
11b28e93d3 | ||
|
|
c3d582985f | ||
|
|
4c0924bb24 | ||
|
|
3fa5bb3802 | ||
|
|
c47ec62b83 | ||
|
|
e4bdb37ec6 | ||
|
|
3e6e4999ca | ||
|
|
0e15e725a0 | ||
|
|
437f68d868 | ||
|
|
d91d124081 | ||
|
|
2d42905b68 | ||
|
|
cbe71cb41d | ||
|
|
894dd8682e | ||
|
|
9e05d039e0 | ||
|
|
bbd5f2de5e | ||
|
|
73689dafbf | ||
|
|
4b50ba0989 | ||
|
|
5ccaddf5b1 | ||
|
|
0b201a3134 | ||
|
|
ffe38646ca | ||
|
|
b703ab4d7f | ||
|
|
c6afed48ff | ||
|
|
732c848c14 | ||
|
|
9d2a4dae90 | ||
|
|
7009a9047a | ||
|
|
498942f187 | ||
|
|
28465df1ff | ||
|
|
ef89dba58f | ||
|
|
13ba3a6461 | ||
|
|
8f6ec4bbe6 | ||
|
|
c295490830 | ||
|
|
eb4cb42a02 | ||
|
|
7a8cbc72b2 | ||
|
|
2774852c2f | ||
|
|
bbcc21efd1 | ||
|
|
60526d6bcb | ||
|
|
1d4df56d09 | ||
|
|
a1cf99d03a | ||
|
|
3c6af203cc | ||
|
|
1a92e086a7 | ||
|
|
519c73f267 | ||
|
|
a6dae6c09c | ||
|
|
f866e474f3 | ||
|
|
8bb9b97c97 | ||
|
|
d6fdc38682 | ||
|
|
c2b61af548 | ||
|
|
2fdbf27ad8 | ||
|
|
3898c8a7b2 | ||
|
|
29ed169cd6 | ||
|
|
b868c972d1 | ||
|
|
9908e03528 | ||
|
|
1fe8fb8c20 | ||
|
|
5d63b0aa93 | ||
|
|
4164f0117e | ||
|
|
37aab27808 | ||
|
|
6110bbbfdd | ||
|
|
cde9b380e6 | ||
|
|
dab647a7b6 | ||
|
|
a316a83d2b | ||
|
|
81b22aee8b | ||
|
|
a80c96eab0 | ||
|
|
20436c30c9 | ||
|
|
3828505646 | ||
|
|
11fba1751d | ||
|
|
12ea2f30cf | ||
|
|
9c3e870393 | ||
|
|
44789f2457 | ||
|
|
711ede6e1b | ||
|
|
a32f253112 | ||
|
|
94bd361318 | ||
|
|
acd40f64ed | ||
|
|
766306450d | ||
|
|
e7642ab572 | ||
|
|
bdf9701729 | ||
|
|
b5af6fcdad | ||
|
|
278143df5b | ||
|
|
fdca55fe34 | ||
|
|
4f195f55f0 | ||
|
|
ac35c26686 | ||
|
|
982a58d049 | ||
|
|
42f7d2f588 | ||
|
|
39f0a2a6b7 | ||
|
|
ecc0c5ee01 | ||
|
|
451948b28c | ||
|
|
baa708036c | ||
|
|
8c25f81bee | ||
|
|
4c83c96795 | ||
|
|
9580711841 | ||
|
|
c30ae9594c | ||
|
|
ffae28ae18 | ||
|
|
d9116714f2 | ||
|
|
08965906a8 | ||
|
|
ccdd0ffb80 | ||
|
|
5263cdfcf9 | ||
|
|
b2a68d14cf | ||
|
|
6e1cff9c33 | ||
|
|
72975729c8 | ||
|
|
d319948b6a | ||
|
|
9a4bf889f9 | ||
|
|
2a834bdb21 | ||
|
|
0d2c141865 | ||
|
|
5ec39d8b96 | ||
|
|
7b6de3728a | ||
|
|
a51d3aa001 | ||
|
|
2c8e03d937 | ||
|
|
fbb21cf528 | ||
|
|
b8a618f898 | ||
|
|
feb74960eb | ||
|
|
d65d628613 | ||
|
|
ac645ac7d0 | ||
|
|
7d11297f3f | ||
|
|
6ad4013d40 | ||
|
|
dbd1283d31 | ||
|
|
c451d4f553 | ||
|
|
8abec2c8bb | ||
|
|
a9bad429b3 | ||
|
|
50c8266ef0 | ||
|
|
00edd4f9be | ||
|
|
ee966928af | ||
|
|
e5193599ec | ||
|
|
01d663bca3 | ||
|
|
e0c51cdadc | ||
|
|
9334f8f17a | ||
|
|
632256d9ec | ||
|
|
03df7baa6a | ||
|
|
3511266bc3 | ||
|
|
9fdece5d34 | ||
|
|
bbf1092ad0 | ||
|
|
9ef55c5bbc | ||
|
|
48a24ab746 | ||
|
|
68acdbda9d | ||
|
|
27c542c06f | ||
|
|
aaa399d2f6 | ||
|
|
b2e6a1c14c | ||
|
|
8cc3eba79a | ||
|
|
b0fb6d4db1 | ||
|
|
81515ad9f6 | ||
|
|
8112d4b284 | ||
|
|
bf7aa6301b | ||
|
|
aea856621f | ||
|
|
f24a5a2faa | ||
|
|
ecfe623422 | ||
|
|
4a6c94288a | ||
|
|
10e3d73472 | ||
|
|
15956b5aa1 | ||
|
|
586f7082ef | ||
|
|
d6d9186f0d | ||
|
|
2e9ff8f362 | ||
|
|
6407432333 | ||
|
|
f744c0f398 | ||
|
|
249efaf44b | ||
|
|
8d32abff9e | ||
|
|
94f052cbf4 | ||
|
|
446a03bd96 | ||
|
|
6009b69f81 | ||
|
|
3d6047113c | ||
|
|
9dec99303d | ||
|
|
7706927370 | ||
|
|
3adba6fa2a | ||
|
|
f46a8702cc | ||
|
|
8d11b59bbb | ||
|
|
cf501a23d2 | ||
|
|
2bcae58d46 | ||
|
|
c9f08154a3 | ||
|
|
526b276fd7 | ||
|
|
77ec444d9a | ||
|
|
bfc2bedcfc | ||
|
|
83855f3a1f | ||
|
|
50b51830fb | ||
|
|
3d6eed9b52 | ||
|
|
1a253e134c | ||
|
|
6194bb1419 | ||
|
|
37d66e7f1e | ||
|
|
70b7e3fbb6 | ||
|
|
0551a02b82 | ||
|
|
579657ad87 | ||
|
|
5f82b129e0 | ||
|
|
64269e4d01 | ||
|
|
d481699a7a | ||
|
|
5894a4f4ee | ||
|
|
09e5d6a6e5 | ||
|
|
274b12b5a8 | ||
|
|
23be51d8ce | ||
|
|
488447455d | ||
|
|
d28b517154 | ||
|
|
a7e97f6db1 | ||
|
|
639a422d21 | ||
|
|
f889cea109 | ||
|
|
1bdeb7be2e | ||
|
|
699151bcb1 | ||
|
|
911344e5ac | ||
|
|
03936f6e6d | ||
|
|
b13ccb1b87 | ||
|
|
f64f8a4662 | ||
|
|
681b9caa9c | ||
|
|
0eb9fb9f24 | ||
|
|
9a76f416ce | ||
|
|
603821161f | ||
|
|
d3c72db894 | ||
|
|
43d9718fb9 | ||
|
|
7fc54e5262 | ||
|
|
ec9c978481 | ||
|
|
d36cae46d8 | ||
|
|
fdfefa1b9c | ||
|
|
724d031893 | ||
|
|
63e0be3415 | ||
|
|
c64ed2a310 | ||
|
|
cdc5cb7c2b | ||
|
|
8efd06aa42 | ||
|
|
7f9ced64cb | ||
|
|
7608815cc2 | ||
|
|
5823eda139 | ||
|
|
e82c1e9a6e | ||
|
|
1ede5b2481 | ||
|
|
964ae0a122 | ||
|
|
98e1d28982 | ||
|
|
2c26df763c | ||
|
|
018e835594 | ||
|
|
e65e06fbe2 | ||
|
|
95ee84421e | ||
|
|
2acfe95f58 | ||
|
|
b5a14350b9 | ||
|
|
8d81f872fb | ||
|
|
36f1c90497 | ||
|
|
057a5206cc | ||
|
|
9e9bc793f3 | ||
|
|
25fadd06d0 | ||
|
|
7a47d07c6d | ||
|
|
5c565ac9e7 | ||
|
|
67500bf939 | ||
|
|
b1edd7a48a | ||
|
|
34e48bed3b | ||
|
|
2c63ccec78 | ||
|
|
f2f2c0c2c6 | ||
|
|
4661e243f8 | ||
|
|
f3cd403c2b | ||
|
|
ad5f53ac72 | ||
|
|
75da98e9e1 | ||
|
|
281d3f1d68 | ||
|
|
6283c10b1c | ||
|
|
85d7b76586 | ||
|
|
2399535fd1 | ||
|
|
52cffcb186 | ||
|
|
8f3b5397a7 | ||
|
|
9bbec55255 | ||
|
|
6b445558ff | ||
|
|
bfd91588f3 | ||
|
|
6bf6962062 | ||
|
|
40bca5f927 | ||
|
|
74214d35c5 | ||
|
|
1b10a011ec | ||
|
|
5a000b45b3 | ||
|
|
40b1cbafac | ||
|
|
4231235cda | ||
|
|
d24a2b20b4 | ||
|
|
8230018c20 | ||
|
|
cc98a3f096 | ||
|
|
ce519b747e | ||
|
|
16efb3695f | ||
|
|
4510d14f0a | ||
|
|
0f175a932f | ||
|
|
849b269273 | ||
|
|
95fa5fb569 | ||
|
|
77c3c5c5ed | ||
|
|
159444a668 | ||
|
|
f9befee1f5 | ||
|
|
9471c44405 | ||
|
|
013bfdd84c | ||
|
|
46fd0dd5a5 | ||
|
|
4698f0d858 | ||
|
|
355d074ff9 | ||
|
|
7da224c907 | ||
|
|
1723edb1a5 | ||
|
|
4740864508 | ||
|
|
09a42738fc | ||
|
|
df928d500f | ||
|
|
a72cbfacf0 | ||
|
|
7b61ac3ddf | ||
|
|
62a164e713 | ||
|
|
5f58165def | ||
|
|
a86c73cf80 | ||
|
|
bd4e40df1a | ||
|
|
1419fafd36 | ||
|
|
9b36dcbd65 | ||
|
|
2aefb886fa | ||
|
|
72961c2a8a | ||
|
|
4c1ce987b0 | ||
|
|
8a2300a597 | ||
|
|
1cc887cbf0 | ||
|
|
203fb43f36 | ||
|
|
4d7b03f1f2 | ||
|
|
72ebb5e4b4 | ||
|
|
8450c15c25 | ||
|
|
b88b45e46c | ||
|
|
2417dc1715 | ||
|
|
23d83ad4d5 | ||
|
|
772ece3571 | ||
|
|
2c9f31188b | ||
|
|
d18be55533 | ||
|
|
ac20fc047a | ||
|
|
b4c3c8c172 | ||
|
|
b1c3a49fff | ||
|
|
3357110a4c | ||
|
|
e29fdedb45 | ||
|
|
4828703f14 | ||
|
|
afe08e0d4a | ||
|
|
071420e136 | ||
|
|
f4cf848d1d | ||
|
|
b7b2ca6e2b | ||
|
|
1409704afa | ||
|
|
3741302a10 | ||
|
|
c8e390c2b0 | ||
|
|
823f1e015a | ||
|
|
3c06d3715e | ||
|
|
762958d5af | ||
|
|
53d9009bdb | ||
|
|
1b725173a5 | ||
|
|
0ca41c3d9c | ||
|
|
fc6861b175 | ||
|
|
b097b5f246 | ||
|
|
385009fc44 | ||
|
|
ced659bb4d | ||
|
|
842cca7d56 | ||
|
|
b3826f6c8d | ||
|
|
7bc8780c57 | ||
|
|
c59c3c84ed | ||
|
|
24f7fb5e1e | ||
|
|
3b700f8d43 | ||
|
|
31d06400ec | ||
|
|
642b76ac15 | ||
|
|
4c4de296d4 | ||
|
|
b10609d98c | ||
|
|
3ae165aa10 | ||
|
|
e4b85e35d0 | ||
|
|
bb0c206f59 | ||
|
|
b81f484b60 | ||
|
|
5e69192ef7 | ||
|
|
e9be9a6acd | ||
|
|
f47754f061 | ||
|
|
d838b1bd4a | ||
|
|
fe506288bd | ||
|
|
d397c0b3dd | ||
|
|
146c80e256 | ||
|
|
f78c01f68b | ||
|
|
8489578df4 | ||
|
|
10606050bc | ||
|
|
d9bf465200 | ||
|
|
01384d6e4b | ||
|
|
08d5230945 | ||
|
|
852f8641e8 | ||
|
|
18937a50a4 | ||
|
|
e4d6cca0c1 | ||
|
|
d5feab9aaa | ||
|
|
9e77c60c9a | ||
|
|
1414df5ce2 | ||
|
|
e80f40e5ca | ||
|
|
d3c9af84fc | ||
|
|
59d206ca2d | ||
|
|
fc66e4a0d5 | ||
|
|
e7b6d12254 | ||
|
|
410f3e73ab | ||
|
|
07e764439a | ||
|
|
f8fb4a7ca8 | ||
|
|
4644ac5527 | ||
|
|
e497a7f2ca | ||
|
|
a3b6be104d | ||
|
|
b7bb0df21e | ||
|
|
4dc19c0982 | ||
|
|
58ea7ec81e | ||
|
|
c0f64ac689 | ||
|
|
7a08ad7d59 | ||
|
|
2d29ac4f23 | ||
|
|
a7a747d687 | ||
|
|
c816336cbd | ||
|
|
fdb4d278bf | ||
|
|
59c03a9bfb | ||
|
|
e7db973328 | ||
|
|
99b67fecc5 | ||
|
|
89294b5f50 | ||
|
|
72d53356f6 | ||
|
|
9e1e67fc15 | ||
|
|
1e60e5546e | ||
|
|
457749a703 | ||
|
|
937f935db0 | ||
|
|
80bcefcd77 | ||
|
|
8c23945c72 | ||
|
|
989b4b2b86 | ||
|
|
2a7b4681c6 | ||
|
|
8157ae3904 | ||
|
|
e50e2fcd4d | ||
|
|
6be451f422 | ||
|
|
5e4f06197f | ||
|
|
761e1645e0 | ||
|
|
8ff14175e2 | ||
|
|
dbe3043cd6 | ||
|
|
a8eb5a8e61 | ||
|
|
6043f1df4e | ||
|
|
12548cd933 | ||
|
|
2593039522 | ||
|
|
35d3e63d24 | ||
|
|
27aede9074 | ||
|
|
f5b7e6a842 | ||
|
|
a1f934b171 | ||
|
|
a43ee88c6f | ||
|
|
e2dce53781 | ||
|
|
1770ed9e86 | ||
|
|
457ac58cc7 | ||
|
|
9c44d2429b | ||
|
|
d2e32f7df5 | ||
|
|
67077b182b | ||
|
|
5f4c318844 | ||
|
|
dfee83234b | ||
|
|
7f5c0c4a19 | ||
|
|
4bc77c8417 | ||
|
|
22dd3fad86 | ||
|
|
d6e6a42256 | ||
|
|
76e7d1e74b | ||
|
|
38c4d41b74 | ||
|
|
f0b8e3607d | ||
|
|
51ee08c4bb | ||
|
|
c841789772 | ||
|
|
c121a75b36 | ||
|
|
5a8b77551d | ||
|
|
0217aee154 | ||
|
|
b14f3a4c1d | ||
|
|
92f7963f6e | ||
|
|
88fbe4c2cc | ||
|
|
394599f422 | ||
|
|
ed9266db90 | ||
|
|
f4b1c7adb8 | ||
|
|
c95eeb7b80 | ||
|
|
5e43e3803c | ||
|
|
a89435a7a8 | ||
|
|
a0a90b3ba1 | ||
|
|
c664182323 | ||
|
|
6be1cd4ddb | ||
|
|
ee0d90707a | ||
|
|
f776d8f608 | ||
|
|
b3ac3a51ac | ||
|
|
0b75c2a88b | ||
|
|
7b7518124e | ||
|
|
68b0973046 | ||
|
|
3a203b8bfa | ||
|
|
70752ccefd | ||
|
|
0155549d6c | ||
|
|
b66745288e | ||
|
|
2a1325fdde | ||
|
|
2f9e8776df | ||
|
|
497339fa0e | ||
|
|
8e6f8051f0 | ||
|
|
11b3ce8509 | ||
|
|
6a5af6acb9 | ||
|
|
9a0d98bb40 | ||
|
|
fbd3162e49 | ||
|
|
54e9a4af95 | ||
|
|
8a32b82e46 | ||
|
|
fec02bcc90 | ||
|
|
c6e90caaa6 | ||
|
|
4bbf157794 | ||
|
|
6b08cdf626 | ||
|
|
b686fc18da | ||
|
|
0b97f3a936 | ||
|
|
eb73f2649f | ||
|
|
f0b5d6af74 | ||
|
|
2f771f6c99 | ||
|
|
3b2f933b01 | ||
|
|
cc746841e7 | ||
|
|
ac7553d031 | ||
|
|
cdc628a498 | ||
|
|
69ea8ca42c | ||
|
|
4bc3a23ec5 | ||
|
|
bd5650ac64 | ||
|
|
86916dae4b | ||
|
|
f7d159cf95 | ||
|
|
632e5684ce | ||
|
|
094d42fe44 | ||
|
|
63cddb6477 | ||
|
|
273dea4248 | ||
|
|
f90d95edeb | ||
|
|
45c85d7ba1 | ||
|
|
d0df92928b | ||
|
|
df8f53f752 | ||
|
|
e35cb78c40 | ||
|
|
3ef7d11acd | ||
|
|
224ce0d872 | ||
|
|
dd41e8c82b | ||
|
|
b509a4b176 | ||
|
|
b28c8403b2 | ||
|
|
7bd4b4229a | ||
|
|
72e450c555 | ||
|
|
522c55b7f2 | ||
|
|
58e7071a2c | ||
|
|
516812df41 | ||
|
|
752297631f | ||
|
|
34e14a9beb | ||
|
|
ffb5b05db1 | ||
|
|
3e8fcd9fa1 | ||
|
|
746c67d72f | ||
|
|
5aa38e75b2 | ||
|
|
532f5bff70 | ||
|
|
f566d9f1d5 | ||
|
|
7267bd536f | ||
|
|
589d3d7c7a | ||
|
|
46f74bcf5c | ||
|
|
37bfe8ace4 | ||
|
|
0529eef5a4 | ||
|
|
fd78a4d3e6 | ||
|
|
1de33fafd9 | ||
|
|
e2e5dae64d | ||
|
|
09b23c902b | ||
|
|
109a540e7a | ||
|
|
2914e5f00f | ||
|
|
2f834e9381 | ||
|
|
9296738f20 | ||
|
|
0e59b9fffb | ||
|
|
67abbe9527 | ||
|
|
944a3de278 | ||
|
|
5a13fe9ed2 | ||
|
|
6b6096d0b7 | ||
|
|
d0246d07f1 | ||
|
|
727a98c3ee | ||
|
|
997987d568 | ||
|
|
c001f939e4 | ||
|
|
e825c38082 | ||
|
|
a04aa7a9e6 | ||
|
|
7cdd5339b3 | ||
|
|
38349518f1 | ||
|
|
64892c0b79 | ||
|
|
dc9f356846 | ||
|
|
ed86ee3b4a | ||
|
|
7bb5df1cda | ||
|
|
37a81dff04 | ||
|
|
fc96eb4e21 | ||
|
|
ae369738b0 | ||
|
|
e2037b3f7d | ||
|
|
5419033935 | ||
|
|
2eebf060af | ||
|
|
acd9db5902 | ||
|
|
d0e8b3d59b | ||
|
|
c15dd15388 | ||
|
|
0003a5c416 | ||
|
|
21f2927f70 | ||
|
|
e5a79071a5 | ||
|
|
ca0e7a2b17 | ||
|
|
b523bb71ab | ||
|
|
a020a0dc20 | ||
|
|
6d1f2431bd | ||
|
|
fdea3abdf8 | ||
|
|
59d284c316 | ||
|
|
98703c7fbf | ||
|
|
b04c8f7358 | ||
|
|
56d1912f1d | ||
|
|
eb3bd7ba8d | ||
|
|
3b11e86eeb | ||
|
|
5ba693d082 | ||
|
|
11ab41658e | ||
|
|
ae91f6f99b | ||
|
|
3da0e1f8cd | ||
|
|
98676c08a1 | ||
|
|
5dbf3b5c60 | ||
|
|
b8d61935a0 | ||
|
|
20ff802c9b | ||
|
|
2bca84e345 | ||
|
|
e5a93354bc | ||
|
|
ceff3fd8ef | ||
|
|
984e8e14ea | ||
|
|
d05cfe0600 | ||
|
|
7e6a715380 | ||
|
|
d74bebd502 | ||
|
|
bf0ff93277 | ||
|
|
dc752ff442 | ||
|
|
3a36d3782e | ||
|
|
50317dbbad | ||
|
|
6539c91a78 | ||
|
|
4eefbfdbfd | ||
|
|
652f283135 | ||
|
|
2daabe4992 | ||
|
|
34a741a8ec | ||
|
|
78caa52aea | ||
|
|
310d75d141 | ||
|
|
67b7e712d8 | ||
|
|
1ad6efd9b6 | ||
|
|
c3c3fe4768 | ||
|
|
6f6ed04172 | ||
|
|
5309602106 | ||
|
|
9291475f7d | ||
|
|
81127aa55d | ||
|
|
9b583dca4c | ||
|
|
f4a3490cbc | ||
|
|
3d9fae1ed2 | ||
|
|
5c47fc6809 | ||
|
|
89fb6a9797 | ||
|
|
b734a87112 | ||
|
|
573585c234 | ||
|
|
da2e1f53e0 | ||
|
|
f24e740bb8 | ||
|
|
37419b4f99 | ||
|
|
64a450131b | ||
|
|
adf2c0989d | ||
|
|
8fb7ff25c5 | ||
|
|
473219a778 | ||
|
|
aa37e3d486 | ||
|
|
edb53e2dc3 | ||
|
|
f401a4173b | ||
|
|
de00ff6494 | ||
|
|
3616997929 | ||
|
|
eef93b0912 | ||
|
|
08d037309e | ||
|
|
6024b0f25e | ||
|
|
561266641f | ||
|
|
6899f2fe9e | ||
|
|
136c8bd275 | ||
|
|
1bf5423e82 | ||
|
|
2c5c1f48e9 | ||
|
|
d8e487fbd9 | ||
|
|
bc7ff0a8dd | ||
|
|
8e9da53140 | ||
|
|
f1d15e6dbc | ||
|
|
91ebb17ede | ||
|
|
c63b30901b | ||
|
|
f009f19ece | ||
|
|
68477e8839 | ||
|
|
0dc5365564 | ||
|
|
9face18d08 | ||
|
|
ff0ba8ce0f | ||
|
|
759c6293bd | ||
|
|
a8aa99442f | ||
|
|
94b539d155 | ||
|
|
b8874d4d4e | ||
|
|
3fbeb95e14 | ||
|
|
6e25f51cdf | ||
|
|
321c1e44f9 | ||
|
|
cc7fec5818 | ||
|
|
5fb9077e8c | ||
|
|
8e20f81c5b | ||
|
|
e154762c74 | ||
|
|
ba92ab3d05 | ||
|
|
a2f0cdc074 | ||
|
|
0933440086 | ||
|
|
70a1ecd2c1 | ||
|
|
88a23aef5a | ||
|
|
140d8d77b3 | ||
|
|
665cd96929 | ||
|
|
4d067a58ca | ||
|
|
1c1cff6a52 | ||
|
|
f063a04f07 | ||
|
|
af8812bb9b | ||
|
|
78149a962b | ||
|
|
f2d9e3a370 | ||
|
|
16e6f396b4 | ||
|
|
c6ec6b2e8b | ||
|
|
7bbc6428b6 | ||
|
|
c1a3c9ddb2 | ||
|
|
feec0f56f5 | ||
|
|
8029857d27 | ||
|
|
aa61802c1e | ||
|
|
f54aee0209 | ||
|
|
5df921b0e3 | ||
|
|
35d5b67876 | ||
|
|
674c869af4 | ||
|
|
10710ae386 | ||
|
|
a0e07d3161 | ||
|
|
88fc294f7f | ||
|
|
a232bb9551 | ||
|
|
eb833b7f5a | ||
|
|
f164038b79 | ||
|
|
f7a361c4f1 | ||
|
|
884ae74785 | ||
|
|
1dba4a2185 | ||
|
|
7d4d5f25ed | ||
|
|
33422c056d | ||
|
|
a7862a1bc8 | ||
|
|
3baa62e8d1 | ||
|
|
1bf8cf5c2c | ||
|
|
eade1d7eab | ||
|
|
1a94ff6865 | ||
|
|
b47ed50aaf | ||
|
|
1b8477729a | ||
|
|
ff6ade294c | ||
|
|
11fc065c57 | ||
|
|
94388f50b3 | ||
|
|
a444648202 | ||
|
|
7ca2e11f24 | ||
|
|
563f6dea59 | ||
|
|
e4039057be | ||
|
|
11342b54d6 | ||
|
|
49fa38adf2 | ||
|
|
9b330db7f0 | ||
|
|
d740f7e16f | ||
|
|
07e7dc4bdc | ||
|
|
4c59dc4c34 | ||
|
|
7260ea0705 | ||
|
|
35b1e44567 | ||
|
|
c9ea760e31 | ||
|
|
9ebf22b7d9 | ||
|
|
2582bebe06 | ||
|
|
c9cc0bf57b | ||
|
|
61edcfb0a2 | ||
|
|
a8be56ce3d | ||
|
|
329818484c | ||
|
|
8bdfddf641 | ||
|
|
36d65b61d4 | ||
|
|
7d48c06f27 | ||
|
|
d169e36f5c | ||
|
|
2d7af09487 | ||
|
|
48d4681efc | ||
|
|
9ea9b61448 | ||
|
|
04b4aa4a7b | ||
|
|
5a3f0d9aee | ||
|
|
1ed5b5c9c8 | ||
|
|
d10548b691 | ||
|
|
e990510e6b | ||
|
|
55f7bd2dcc | ||
|
|
f931e25959 | ||
|
|
ca9cd290c7 | ||
|
|
49e23e8b6a | ||
|
|
ae7246e7d5 | ||
|
|
43fd392413 | ||
|
|
3e7c12240c | ||
|
|
7eb21356f9 | ||
|
|
f30a38be8b | ||
|
|
2aebbccefc | ||
|
|
b170935a8f | ||
|
|
35241d05d1 | ||
|
|
be2dd0651e | ||
|
|
6a400a6339 | ||
|
|
7b53af7f70 | ||
|
|
ca7b3246b6 | ||
|
|
9c4c233b84 | ||
|
|
8a6c59865d | ||
|
|
1d57b2520c | ||
|
|
17b0b8a166 | ||
|
|
12c82cf9cb | ||
|
|
0bafcf6f46 | ||
|
|
bbc9dc56f6 | ||
|
|
72c65d39ff | ||
|
|
676e3ecf24 | ||
|
|
78272a076e | ||
|
|
723e04d0be | ||
|
|
08a36c3569 | ||
|
|
37709fae89 | ||
|
|
a81e4eb69d | ||
|
|
8e72edfb19 | ||
|
|
863f08a92e | ||
|
|
de2d9f5f1b | ||
|
|
a520c11241 | ||
|
|
b94744d157 | ||
|
|
753727cded | ||
|
|
daebaab692 | ||
|
|
3524cc25ca | ||
|
|
29a7e1f261 | ||
|
|
df53b6358d | ||
|
|
f1a9d64eea | ||
|
|
27f774862d | ||
|
|
b759a0d4d2 | ||
|
|
22a6f15061 | ||
|
|
259454525f | ||
|
|
ce6815aad3 | ||
|
|
4e408e479b | ||
|
|
a9d3890fcb | ||
|
|
2857084000 | ||
|
|
7798fad535 | ||
|
|
baf2907501 | ||
|
|
3c765c68c4 | ||
|
|
29153f49b3 | ||
|
|
071a236c5a | ||
|
|
10437550f4 | ||
|
|
2929b3e71d | ||
|
|
22449fa624 | ||
|
|
d36d3f420c | ||
|
|
60ac04e57c | ||
|
|
a7680bf330 | ||
|
|
6d3d3fc083 | ||
|
|
aff216edf4 | ||
|
|
1cb6dcdbbe | ||
|
|
3f514a353e | ||
|
|
da9ec3b932 | ||
|
|
191b7cbba9 | ||
|
|
e8c59b9642 | ||
|
|
6abb066128 | ||
|
|
8f1ea7cbb6 | ||
|
|
a204c85408 | ||
|
|
15a1f4b8fe | ||
|
|
c7bee2a725 | ||
|
|
dbc1366b50 | ||
|
|
704df56da7 | ||
|
|
33ac271ba7 | ||
|
|
0963f92f23 | ||
|
|
9a66c1079c | ||
|
|
f971dcbba0 | ||
|
|
0990305d2a | ||
|
|
bcc069a937 | ||
|
|
34708e1bb6 | ||
|
|
829476b80a | ||
|
|
1dd70fe330 | ||
|
|
067e922295 | ||
|
|
c28df2478f | ||
|
|
241f7a8ade | ||
|
|
b252735910 | ||
|
|
7adcbe7594 | ||
|
|
8d31fa3cce | ||
|
|
1f06864e9a | ||
|
|
348ae0a79e | ||
|
|
528d455632 | ||
|
|
ba5d51b340 | ||
|
|
7833d941bb | ||
|
|
a2360a4c80 | ||
|
|
a7cacbca2b | ||
|
|
c6b4132a0a | ||
|
|
ad260c90ab | ||
|
|
b8313f07bc | ||
|
|
92a17d28ac | ||
|
|
5f90042bd6 | ||
|
|
9480d1a566 | ||
|
|
36b0079f23 | ||
|
|
28028629b9 | ||
|
|
11f75cac3d | ||
|
|
e673db0194 | ||
|
|
ebab4520ff | ||
|
|
a71d1414eb | ||
|
|
423817c468 | ||
|
|
51ed9fce09 | ||
|
|
d43aeb1d00 | ||
|
|
4d805e063c | ||
|
|
24e5e24166 | ||
|
|
4d54ef20a2 | ||
|
|
54036b3991 | ||
|
|
e5402ac120 | ||
|
|
f56f8399c7 | ||
|
|
cf0c5fa3a1 | ||
|
|
8c2ccefae6 | ||
|
|
1f8b6af773 | ||
|
|
8f9b683eeb | ||
|
|
b5f4775b38 | ||
|
|
01d906ffe9 | ||
|
|
614582bcc4 | ||
|
|
e1ab5000b2 | ||
|
|
a5ed3e571e | ||
|
|
10eaeb20c5 | ||
|
|
fa8deaf38b | ||
|
|
6857590059 | ||
|
|
a3db22ebdf | ||
|
|
c8e9a235d9 | ||
|
|
30b871b0ca | ||
|
|
eb9da9b732 | ||
|
|
d769be6c96 | ||
|
|
a54bda3ae2 | ||
|
|
00558d9414 | ||
|
|
49f3c16543 | ||
|
|
2ef6fcb5d8 | ||
|
|
38fc045253 | ||
|
|
af1fd929c6 | ||
|
|
b7b04c9234 | ||
|
|
bc0bb6fd30 | ||
|
|
430826c9d4 | ||
|
|
68909f0c4e | ||
|
|
9d048a17d8 | ||
|
|
492641d10a | ||
|
|
2b9faf5542 | ||
|
|
ed2d6a1960 | ||
|
|
be843678b1 | ||
|
|
c71dfccc98 | ||
|
|
1a9ccac7c1 | ||
|
|
e330d59abb | ||
|
|
394df6d7d0 | ||
|
|
218f754940 | ||
|
|
a053c3493a | ||
|
|
50b294aab8 | ||
|
|
756b046f3e | ||
|
|
388ac0b18a | ||
|
|
ad06434bd3 | ||
|
|
bd9820c937 | ||
|
|
deda8ac376 | ||
|
|
e05f693942 | ||
|
|
b27295d2ab | ||
|
|
ace52c5713 | ||
|
|
e62e150f64 | ||
|
|
c44c0a775d | ||
|
|
5fcf2dbed0 | ||
|
|
91dff03217 | ||
|
|
a200f4cee2 | ||
|
|
ea6e8d5454 | ||
|
|
83d35817f5 | ||
|
|
76beff70a8 | ||
|
|
61882bf7c6 | ||
|
|
cab317a680 | ||
|
|
73159f99cc | ||
|
|
c15235cd07 | ||
|
|
12c3ec3382 | ||
|
|
55db73efdf | ||
|
|
af40ac054a | ||
|
|
a36819731b | ||
|
|
181c8655c7 | ||
|
|
3b95347bb6 | ||
|
|
3b88ee9a7d | ||
|
|
55c49908d2 | ||
|
|
db9b0b67b7 | ||
|
|
35f76e0061 | ||
|
|
3f338cd6de | ||
|
|
1d01f26ab1 | ||
|
|
266c71f971 | ||
|
|
e8ee972c6e | ||
|
|
f83dda12ad | ||
|
|
ca7a9c1bf7 | ||
|
|
696d49815e | ||
|
|
fe556f1b0c | ||
|
|
d5638d974f | ||
|
|
938dd254e5 | ||
|
|
6493f5d704 | ||
|
|
cd6b48365e | ||
|
|
4d9bd478f9 | ||
|
|
165250ff5e | ||
|
|
83317f6938 | ||
|
|
c1d293cfa6 | ||
|
|
247a5da704 | ||
|
|
49807b4ac6 | ||
|
|
d1b4617e1d | ||
|
|
c990bb3633 | ||
|
|
af8322d2f9 | ||
|
|
df866e7f2a | ||
|
|
664718ff63 | ||
|
|
74dcf42a85 | ||
|
|
a42c921598 | ||
|
|
3258263371 | ||
|
|
f96252b913 | ||
|
|
3cfafc4a9b | ||
|
|
6f600ff5d6 | ||
|
|
90e075da3a | ||
|
|
9572013de9 | ||
|
|
3a5beb0ca1 | ||
|
|
a6da7b6b96 | ||
|
|
173a7026d5 | ||
|
|
40a90862f4 | ||
|
|
511c4325dc | ||
|
|
85a699246a | ||
|
|
4dc5286e13 | ||
|
|
c767dc74b8 | ||
|
|
56ca04f662 | ||
|
|
eb3680123a | ||
|
|
f5273890ee | ||
|
|
c7a088a816 | ||
|
|
fb17b60811 | ||
|
|
04b89c9026 | ||
|
|
1e58804260 | ||
|
|
31bf213032 | ||
|
|
1cccc41ddc | ||
|
|
a91cf27767 | ||
|
|
64d02399d8 | ||
|
|
5961017202 | ||
|
|
d9760fd43c | ||
|
|
d42b2d2985 | ||
|
|
0c72eb9060 | ||
|
|
00cf122d7a | ||
|
|
cccfab6412 | ||
|
|
4665664c92 | ||
|
|
0adc996bc3 | ||
|
|
b42a2a720b | ||
|
|
37edd7dd4a | ||
|
|
f87b3500c5 | ||
|
|
66420a2db4 | ||
|
|
6b8492a782 | ||
|
|
f9f86b0c64 | ||
|
|
6de0595eb8 | ||
|
|
e48a2c646d | ||
|
|
0f831a1a92 | ||
|
|
1ce464aba9 | ||
|
|
6994e70651 | ||
|
|
3e510af38d | ||
|
|
0aed8df2bf | ||
|
|
5ecd7b0a92 | ||
|
|
a229909fa6 | ||
|
|
548f31d99c | ||
|
|
2f61fe4ccc | ||
|
|
03359e9864 | ||
|
|
78b296b0ff | ||
|
|
be79b07907 | ||
|
|
5537dce84d | ||
|
|
493987fefe | ||
|
|
c97797a737 | ||
|
|
8d7d9d3452 | ||
|
|
7a5e7b303c | ||
|
|
61aabb9d70 | ||
|
|
62af3a0eb5 | ||
|
|
60064c53f1 | ||
|
|
98eb1c3fa2 | ||
|
|
201e9eaa0e | ||
|
|
9afa6ede21 | ||
|
|
f4776371ae | ||
|
|
328a20bf9c | ||
|
|
5622f29ae4 | ||
|
|
b4f23afbd1 | ||
|
|
0138968a6a | ||
|
|
c7667c2d7f | ||
|
|
c3f0b12b0f | ||
|
|
4f31d0f2b7 | ||
|
|
bff74bdd1a | ||
|
|
10b04ff7f4 | ||
|
|
1f7ccb9014 | ||
|
|
c7b3209668 | ||
|
|
895ba7d1dd | ||
|
|
a2a1b0baa2 | ||
|
|
8646eb790e | ||
|
|
27ace98f51 | ||
|
|
a00d73c8c8 | ||
|
|
f036a6328e | ||
|
|
31bb8d3f51 | ||
|
|
4958ae2058 | ||
|
|
7e8d73c183 | ||
|
|
1a9b9649fb | ||
|
|
65bc504db8 | ||
|
|
0fc74a0d91 | ||
|
|
8d2cc6fbb1 | ||
|
|
a954584f63 | ||
|
|
cb3ff6fb01 | ||
|
|
71aa656d13 | ||
|
|
366b1f3cfe | ||
|
|
64ce58db38 | ||
|
|
11b85ce62e | ||
|
|
1220352ff7 | ||
|
|
8f3034d871 | ||
|
|
7fa547ab02 | ||
|
|
3182f3e2dc | ||
|
|
cbf915f3f6 | ||
|
|
b490b8849a | ||
|
|
5d2519e5bf | ||
|
|
c3415d1bac | ||
|
|
36f3542883 | ||
|
|
4cb71e9b6a | ||
|
|
4bc7009e8a | ||
|
|
16f8e9df8a | ||
|
|
b081cebefa | ||
|
|
916c145217 | ||
|
|
4192b51c7c | ||
|
|
052421ff09 | ||
|
|
4e99f48817 | ||
|
|
a11165ecc6 | ||
|
|
fbb2fc5580 | ||
|
|
2fe3d240cc | ||
|
|
42f4dcfe41 | ||
|
|
892e3192fb | ||
|
|
7272eab9d0 | ||
|
|
ebe832dc37 | ||
|
|
825abb8175 | ||
|
|
8944ec0109 | ||
|
|
c084c93402 | ||
|
|
8c778adc39 | ||
|
|
71b6065009 | ||
|
|
7e660ac113 | ||
|
|
d799b47b82 | ||
|
|
b7f8116406 | ||
|
|
6db274e057 | ||
|
|
0c92b57398 | ||
|
|
becafcbf0f | ||
|
|
92a86f4c1a | ||
|
|
dfe029a62c | ||
|
|
b0472057a3 | ||
|
|
c081b35c27 | ||
|
|
9f43890bcd | ||
|
|
94a20aa5f8 | ||
|
|
94e8df3a7e | ||
|
|
37e64addc8 | ||
|
|
d82ba23ba5 | ||
|
|
0fd7fd71b4 | ||
|
|
eae12e3fe3 | ||
|
|
798a2cad4f | ||
|
|
41c0849429 | ||
|
|
a4e5af1184 | ||
|
|
b090af5922 | ||
|
|
388841f819 | ||
|
|
1a2ecbfbc4 | ||
|
|
38e292b112 | ||
|
|
c4f731262d | ||
|
|
07cc63f386 | ||
|
|
e42a692f00 | ||
|
|
6ec7538bb4 | ||
|
|
2871d489a9 | ||
|
|
1771ddd85d | ||
|
|
5198bf68fc | ||
|
|
e00fc35dbe | ||
|
|
8904e979df | ||
|
|
53eb217661 | ||
|
|
9dcb8f3fc7 | ||
|
|
1e8ac8364b | ||
|
|
754d8a035e | ||
|
|
f1f725c6a0 | ||
|
|
06c155420f | ||
|
|
7dabd2ac45 | ||
|
|
df8ba0d2cf | ||
|
|
ff1956e07b | ||
|
|
caf5a8817b | ||
|
|
a850fde1d8 | ||
|
|
0e6ebc13d1 | ||
|
|
6f5342a201 | ||
|
|
264a7044f5 | ||
|
|
1a30deca50 | ||
|
|
d8624e6a80 | ||
|
|
4f95d455ed | ||
|
|
468d19a9c1 | ||
|
|
9aeaf730ad | ||
|
|
db964a33a1 | ||
|
|
da8fb85859 | ||
|
|
54330a1c3c | ||
|
|
9732d77ed2 | ||
|
|
199ece7eb8 | ||
|
|
1997eb0078 | ||
|
|
eef4a7a304 | ||
|
|
246168bd72 | ||
|
|
7fbf54dc62 | ||
|
|
351f373865 | ||
|
|
72e785f36a | ||
|
|
727d2930f2 | ||
|
|
c13bf7c836 | ||
|
|
f3308e138d | ||
|
|
29546b345b | ||
|
|
2c57c7fa5a | ||
|
|
b6ea11b967 | ||
|
|
b8c74d606a | ||
|
|
a5d524ef46 | ||
|
|
cceb5ec237 | ||
|
|
71a6eaff83 | ||
|
|
7fd48d0413 | ||
|
|
1b38b5be86 | ||
|
|
decf2ae400 | ||
|
|
0d989011ff | ||
|
|
01b4b74574 | ||
|
|
70f767dc65 | ||
|
|
e75c24e889 | ||
|
|
0cb2056304 | ||
|
|
8adec2b9e0 | ||
|
|
604f292ab7 | ||
|
|
23d3c422ab | ||
|
|
0c1ffe980d | ||
|
|
5e95cb27d6 | ||
|
|
3b86f936c5 | ||
|
|
e0942e37aa | ||
|
|
c45a6caa95 | ||
|
|
61bbddbaa6 | ||
|
|
5425626790 | ||
|
|
5dc3552d85 | ||
|
|
3fbd27f73e | ||
|
|
0382ecb78d | ||
|
|
72edb6fc8c | ||
|
|
66149e3f2b | ||
|
|
6e74521d98 | ||
|
|
cf01013161 | ||
|
|
1e179c7528 | ||
|
|
530ed178b7 | ||
|
|
74aa18f68f | ||
|
|
d9222264a8 | ||
|
|
ca14211e93 | ||
|
|
b1d65c3369 | ||
|
|
b4c538b02b | ||
|
|
13059bceb2 | ||
|
|
d8894e24a4 | ||
|
|
3b09757bac | ||
|
|
2f97f76877 | ||
|
|
43f0537c06 | ||
|
|
a816da0dc3 | ||
|
|
7bb49d1057 | ||
|
|
1aa42fedee | ||
|
|
66aa382eae | ||
|
|
ee90ddab94 | ||
|
|
172240c0a4 | ||
|
|
ad25aee245 | ||
|
|
bd1f325b42 | ||
|
|
00a82ea805 | ||
|
|
b1b01841af | ||
|
|
816930c485 | ||
|
|
76233cda34 | ||
|
|
9dcea39985 | ||
|
|
10d00a756a | ||
|
|
eb50741129 | ||
|
|
3804b01276 | ||
|
|
b1298d8e06 | ||
|
|
c065fd35ae | ||
|
|
6a46dc8db7 | ||
|
|
36cb99f958 | ||
|
|
81650f95e2 | ||
|
|
34dbcb8505 | ||
|
|
c993c829e2 | ||
|
|
0d90e0f067 | ||
|
|
678f58de4b | ||
|
|
c961a0e63e | ||
|
|
aaefb347c0 | ||
|
|
09018e19a5 | ||
|
|
345e37831c | ||
|
|
00ac799b68 | ||
|
|
133af9385b | ||
|
|
40c696e5c6 | ||
|
|
d6d5028922 | ||
|
|
38ad119f97 | ||
|
|
4e415288d7 | ||
|
|
37e3cbe22e | ||
|
|
610134730a | ||
|
|
212a5e28ba | ||
|
|
ee1a7032d5 | ||
|
|
7ed806d241 | ||
|
|
dd06c95e43 | ||
|
|
d415299a80 | ||
|
|
48fbb1003d | ||
|
|
3442b30ab2 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -11,6 +11,7 @@ MANIFEST
|
|||||||
README.txt
|
README.txt
|
||||||
youtube-dl.1
|
youtube-dl.1
|
||||||
youtube-dl.bash-completion
|
youtube-dl.bash-completion
|
||||||
|
youtube-dl.fish
|
||||||
youtube-dl
|
youtube-dl
|
||||||
youtube-dl.exe
|
youtube-dl.exe
|
||||||
youtube-dl.tar.gz
|
youtube-dl.tar.gz
|
||||||
@@ -26,5 +27,9 @@ updates_key.pem
|
|||||||
*.m4a
|
*.m4a
|
||||||
*.m4v
|
*.m4v
|
||||||
*.part
|
*.part
|
||||||
|
*.swp
|
||||||
test/testdata
|
test/testdata
|
||||||
.tox
|
.tox
|
||||||
|
youtube-dl.zsh
|
||||||
|
.idea
|
||||||
|
.idea/*
|
||||||
@@ -2,14 +2,17 @@ language: python
|
|||||||
python:
|
python:
|
||||||
- "2.6"
|
- "2.6"
|
||||||
- "2.7"
|
- "2.7"
|
||||||
|
- "3.2"
|
||||||
- "3.3"
|
- "3.3"
|
||||||
- "3.4"
|
- "3.4"
|
||||||
|
before_install:
|
||||||
|
- sudo apt-get update -qq
|
||||||
|
- sudo apt-get install -yqq rtmpdump
|
||||||
script: nosetests test --verbose
|
script: nosetests test --verbose
|
||||||
notifications:
|
notifications:
|
||||||
email:
|
email:
|
||||||
- filippo.valsorda@gmail.com
|
- filippo.valsorda@gmail.com
|
||||||
- phihag@phihag.de
|
- phihag@phihag.de
|
||||||
- jaime.marquinez.ferrandiz+travis@gmail.com
|
|
||||||
- yasoob.khld@gmail.com
|
- yasoob.khld@gmail.com
|
||||||
# irc:
|
# irc:
|
||||||
# channels:
|
# channels:
|
||||||
|
|||||||
122
AUTHORS
Normal file
122
AUTHORS
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
Ricardo Garcia Gonzalez
|
||||||
|
Danny Colligan
|
||||||
|
Benjamin Johnson
|
||||||
|
Vasyl' Vavrychuk
|
||||||
|
Witold Baryluk
|
||||||
|
Paweł Paprota
|
||||||
|
Gergely Imreh
|
||||||
|
Rogério Brito
|
||||||
|
Philipp Hagemeister
|
||||||
|
Sören Schulze
|
||||||
|
Kevin Ngo
|
||||||
|
Ori Avtalion
|
||||||
|
shizeeg
|
||||||
|
Filippo Valsorda
|
||||||
|
Christian Albrecht
|
||||||
|
Dave Vasilevsky
|
||||||
|
Jaime Marquínez Ferrándiz
|
||||||
|
Jeff Crouse
|
||||||
|
Osama Khalid
|
||||||
|
Michael Walter
|
||||||
|
M. Yasoob Ullah Khalid
|
||||||
|
Julien Fraichard
|
||||||
|
Johny Mo Swag
|
||||||
|
Axel Noack
|
||||||
|
Albert Kim
|
||||||
|
Pierre Rudloff
|
||||||
|
Huarong Huo
|
||||||
|
Ismael Mejía
|
||||||
|
Steffan 'Ruirize' James
|
||||||
|
Andras Elso
|
||||||
|
Jelle van der Waa
|
||||||
|
Marcin Cieślak
|
||||||
|
Anton Larionov
|
||||||
|
Takuya Tsuchida
|
||||||
|
Sergey M.
|
||||||
|
Michael Orlitzky
|
||||||
|
Chris Gahan
|
||||||
|
Saimadhav Heblikar
|
||||||
|
Mike Col
|
||||||
|
Oleg Prutz
|
||||||
|
pulpe
|
||||||
|
Andreas Schmitz
|
||||||
|
Michael Kaiser
|
||||||
|
Niklas Laxström
|
||||||
|
David Triendl
|
||||||
|
Anthony Weems
|
||||||
|
David Wagner
|
||||||
|
Juan C. Olivares
|
||||||
|
Mattias Harrysson
|
||||||
|
phaer
|
||||||
|
Sainyam Kapoor
|
||||||
|
Nicolas Évrard
|
||||||
|
Jason Normore
|
||||||
|
Hoje Lee
|
||||||
|
Adam Thalhammer
|
||||||
|
Georg Jähnig
|
||||||
|
Ralf Haring
|
||||||
|
Koki Takahashi
|
||||||
|
Ariset Llerena
|
||||||
|
Adam Malcontenti-Wilson
|
||||||
|
Tobias Bell
|
||||||
|
Naglis Jonaitis
|
||||||
|
Charles Chen
|
||||||
|
Hassaan Ali
|
||||||
|
Dobrosław Żybort
|
||||||
|
David Fabijan
|
||||||
|
Sebastian Haas
|
||||||
|
Alexander Kirk
|
||||||
|
Erik Johnson
|
||||||
|
Keith Beckman
|
||||||
|
Ole Ernst
|
||||||
|
Aaron McDaniel (mcd1992)
|
||||||
|
Magnus Kolstad
|
||||||
|
Hari Padmanaban
|
||||||
|
Carlos Ramos
|
||||||
|
5moufl
|
||||||
|
lenaten
|
||||||
|
Dennis Scheiba
|
||||||
|
Damon Timm
|
||||||
|
winwon
|
||||||
|
Xavier Beynon
|
||||||
|
Gabriel Schubiner
|
||||||
|
xantares
|
||||||
|
Jan Matějka
|
||||||
|
Mauroy Sébastien
|
||||||
|
William Sewell
|
||||||
|
Dao Hoang Son
|
||||||
|
Oskar Jauch
|
||||||
|
Matthew Rayfield
|
||||||
|
t0mm0
|
||||||
|
Tithen-Firion
|
||||||
|
Zack Fernandes
|
||||||
|
cryptonaut
|
||||||
|
Adrian Kretz
|
||||||
|
Mathias Rav
|
||||||
|
Petr Kutalek
|
||||||
|
Will Glynn
|
||||||
|
Max Reimann
|
||||||
|
Cédric Luthi
|
||||||
|
Thijs Vermeir
|
||||||
|
Joel Leclerc
|
||||||
|
Christopher Krooss
|
||||||
|
Ondřej Caletka
|
||||||
|
Dinesh S
|
||||||
|
Johan K. Jensen
|
||||||
|
Yen Chi Hsuan
|
||||||
|
Enam Mijbah Noor
|
||||||
|
David Luhmer
|
||||||
|
Shaya Goldberg
|
||||||
|
Paul Hartmann
|
||||||
|
Frans de Jonge
|
||||||
|
Robin de Rooij
|
||||||
|
Ryan Schmidt
|
||||||
|
Leslie P. Polzer
|
||||||
|
Duncan Keall
|
||||||
|
Alexander Mamay
|
||||||
|
Devin J. Pohly
|
||||||
|
Eduardo Ferro Aldama
|
||||||
|
Jeff Buchbinder
|
||||||
|
Amish Bhadeshia
|
||||||
|
Joram Schrijver
|
||||||
|
Will W.
|
||||||
140
CONTRIBUTING.md
Normal file
140
CONTRIBUTING.md
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
**Please include the full output of youtube-dl when run with `-v`**.
|
||||||
|
|
||||||
|
The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||||
|
|
||||||
|
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||||
|
|
||||||
|
### Is the description of the issue itself sufficient?
|
||||||
|
|
||||||
|
We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources. Many contributors, including myself, are also not native speakers, so we may misread some parts.
|
||||||
|
|
||||||
|
So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious
|
||||||
|
|
||||||
|
- What the problem is
|
||||||
|
- How it could be fixed
|
||||||
|
- How your proposed solution would look like
|
||||||
|
|
||||||
|
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
|
||||||
|
|
||||||
|
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||||
|
|
||||||
|
If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||||
|
|
||||||
|
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
||||||
|
|
||||||
|
### Are you using the latest version?
|
||||||
|
|
||||||
|
Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
|
||||||
|
|
||||||
|
### Is the issue already documented?
|
||||||
|
|
||||||
|
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
|
||||||
|
|
||||||
|
### Why are existing options not enough?
|
||||||
|
|
||||||
|
Before requesting a new feature, please have a quick peek at [the list of supported options](https://github.com/rg3/youtube-dl/blob/master/README.md#synopsis). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem.
|
||||||
|
|
||||||
|
### Is there enough context in your bug report?
|
||||||
|
|
||||||
|
People want to solve problems, and often think they do us a favor by breaking down their larger problems (e.g. wanting to skip already downloaded files) to a specific request (e.g. requesting us to look whether the file exists before downloading the info page). However, what often happens is that they break down the problem into two steps: One simple, and one impossible (or extremely complicated one).
|
||||||
|
|
||||||
|
We are then presented with a very complicated request when the original problem could be solved far easier, e.g. by recording the downloaded video IDs in a separate file. To avoid this, you must include the greater context where it is non-obvious. In particular, every feature request that does not consist of adding support for a new site should contain a use case scenario that explains in what situation the missing feature would be useful.
|
||||||
|
|
||||||
|
### Does the issue involve one problem, and one problem only?
|
||||||
|
|
||||||
|
Some of our users seem to think there is a limit of issues they can or should open. There is no limit of issues they can or should open. While it may seem appealing to be able to dump all your issues into one ticket, that means that someone who solves one of your issues cannot mark the issue as closed. Typically, reporting a bunch of issues leads to the ticket lingering since nobody wants to attack that behemoth, until someone mercifully splits the issue into multiple ones.
|
||||||
|
|
||||||
|
In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, Whitehouse podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of youtube-dl that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service.
|
||||||
|
|
||||||
|
### Is anyone going to need the feature?
|
||||||
|
|
||||||
|
Only post features that you (or an incapacitated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
||||||
|
|
||||||
|
### Is your question about youtube-dl?
|
||||||
|
|
||||||
|
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
||||||
|
|
||||||
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
|
Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
||||||
|
|
||||||
|
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
||||||
|
|
||||||
|
python -m youtube_dl
|
||||||
|
|
||||||
|
To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
|
||||||
|
|
||||||
|
python -m unittest discover
|
||||||
|
python test/test_download.py
|
||||||
|
nosetests
|
||||||
|
|
||||||
|
If you want to create a build of youtube-dl yourself, you'll need
|
||||||
|
|
||||||
|
* python
|
||||||
|
* make
|
||||||
|
* pandoc
|
||||||
|
* zip
|
||||||
|
* nosetests
|
||||||
|
|
||||||
|
### Adding support for a new site
|
||||||
|
|
||||||
|
If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||||
|
|
||||||
|
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
||||||
|
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||||
|
3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
|
||||||
|
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
|
||||||
|
```python
|
||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class YourExtractorIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://yourextractor.com/watch/42',
|
||||||
|
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '42',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Video title goes here',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
# TODO more properties, either as:
|
||||||
|
# * A value
|
||||||
|
# * MD5 checksum; start the string with md5:
|
||||||
|
# * A regular expression; start the string with re:
|
||||||
|
# * Any Python type (for example int or float)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
# TODO more code goes here, for example ...
|
||||||
|
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||||
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
|
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||||
|
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||||
|
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
|
$ git add youtube_dl/extractor/__init__.py
|
||||||
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
|
$ git commit -m '[yourextractor] Add new extractor'
|
||||||
|
$ git push origin yourextractor
|
||||||
|
|
||||||
|
10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||||
|
|
||||||
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
2012.12.99
|
|
||||||
@@ -2,5 +2,6 @@ include README.md
|
|||||||
include test/*.py
|
include test/*.py
|
||||||
include test/*.json
|
include test/*.json
|
||||||
include youtube-dl.bash-completion
|
include youtube-dl.bash-completion
|
||||||
|
include youtube-dl.fish
|
||||||
include youtube-dl.1
|
include youtube-dl.1
|
||||||
recursive-include docs Makefile conf.py *.rst
|
recursive-include docs Makefile conf.py *.rst
|
||||||
|
|||||||
59
Makefile
59
Makefile
@@ -1,15 +1,14 @@
|
|||||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
|
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 *.avi CONTRIBUTING.md.tmp youtube-dl youtube-dl.exe
|
||||||
|
find . -name "*.pyc" -delete
|
||||||
|
|
||||||
cleanall: clean
|
PREFIX ?= /usr/local
|
||||||
rm -f youtube-dl youtube-dl.exe
|
BINDIR ?= $(PREFIX)/bin
|
||||||
|
MANDIR ?= $(PREFIX)/man
|
||||||
PREFIX=/usr/local
|
SHAREDIR ?= $(PREFIX)/share
|
||||||
BINDIR=$(PREFIX)/bin
|
PYTHON ?= /usr/bin/env python
|
||||||
MANDIR=$(PREFIX)/man
|
|
||||||
PYTHON=/usr/bin/env python
|
|
||||||
|
|
||||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||||
ifeq ($(PREFIX),/usr)
|
ifeq ($(PREFIX),/usr)
|
||||||
@@ -22,23 +21,36 @@ else
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
|
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
install -d $(DESTDIR)$(BINDIR)
|
install -d $(DESTDIR)$(BINDIR)
|
||||||
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
||||||
install -d $(DESTDIR)$(MANDIR)/man1
|
install -d $(DESTDIR)$(MANDIR)/man1
|
||||||
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
|
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
|
||||||
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
|
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
|
||||||
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
|
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
|
||||||
|
install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
|
||||||
|
install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
|
||||||
|
install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
|
||||||
|
install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
|
||||||
|
|
||||||
|
codetest:
|
||||||
|
flake8 .
|
||||||
|
|
||||||
test:
|
test:
|
||||||
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
|
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
|
||||||
nosetests --verbose test
|
nosetests --verbose test
|
||||||
|
$(MAKE) codetest
|
||||||
|
|
||||||
|
ot: offlinetest
|
||||||
|
|
||||||
|
offlinetest: codetest
|
||||||
|
nosetests --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py
|
||||||
|
|
||||||
tar: youtube-dl.tar.gz
|
tar: youtube-dl.tar.gz
|
||||||
|
|
||||||
.PHONY: all clean install test tar bash-completion pypi-files
|
.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion ot offlinetest codetest supportedsites
|
||||||
|
|
||||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
|
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
|
||||||
|
|
||||||
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
||||||
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
|
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
|
||||||
@@ -49,7 +61,13 @@ youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
|||||||
chmod a+x youtube-dl
|
chmod a+x youtube-dl
|
||||||
|
|
||||||
README.md: youtube_dl/*.py youtube_dl/*/*.py
|
README.md: youtube_dl/*.py youtube_dl/*/*.py
|
||||||
COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
|
COLUMNS=80 python youtube_dl/__main__.py --help | python devscripts/make_readme.py
|
||||||
|
|
||||||
|
CONTRIBUTING.md: README.md
|
||||||
|
python devscripts/make_contributing.py README.md CONTRIBUTING.md
|
||||||
|
|
||||||
|
supportedsites:
|
||||||
|
python devscripts/make_supportedsites.py docs/supportedsites.md
|
||||||
|
|
||||||
README.txt: README.md
|
README.txt: README.md
|
||||||
pandoc -f markdown -t plain README.md -o README.txt
|
pandoc -f markdown -t plain README.md -o README.txt
|
||||||
@@ -64,7 +82,17 @@ youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-co
|
|||||||
|
|
||||||
bash-completion: youtube-dl.bash-completion
|
bash-completion: youtube-dl.bash-completion
|
||||||
|
|
||||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
|
||||||
|
python devscripts/zsh-completion.py
|
||||||
|
|
||||||
|
zsh-completion: youtube-dl.zsh
|
||||||
|
|
||||||
|
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
|
||||||
|
python devscripts/fish-completion.py
|
||||||
|
|
||||||
|
fish-completion: youtube-dl.fish
|
||||||
|
|
||||||
|
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
--exclude '*.kate-swp' \
|
--exclude '*.kate-swp' \
|
||||||
@@ -78,5 +106,6 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
|
|||||||
-- \
|
-- \
|
||||||
bin devscripts test youtube_dl docs \
|
bin devscripts test youtube_dl docs \
|
||||||
LICENSE README.md README.txt \
|
LICENSE README.md README.txt \
|
||||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
|
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
|
||||||
|
youtube-dl.zsh youtube-dl.fish setup.py \
|
||||||
youtube-dl
|
youtube-dl
|
||||||
|
|||||||
557
README.md
557
README.md
@@ -1,7 +1,15 @@
|
|||||||
youtube-dl - download videos from youtube.com or other video platforms
|
youtube-dl - download videos from youtube.com or other video platforms
|
||||||
|
|
||||||
# SYNOPSIS
|
- [INSTALLATION](#installation)
|
||||||
**youtube-dl** [OPTIONS] URL [URL...]
|
- [DESCRIPTION](#description)
|
||||||
|
- [OPTIONS](#options)
|
||||||
|
- [CONFIGURATION](#configuration)
|
||||||
|
- [OUTPUT TEMPLATE](#output-template)
|
||||||
|
- [VIDEO SELECTION](#video-selection)
|
||||||
|
- [FAQ](#faq)
|
||||||
|
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
||||||
|
- [BUGS](#bugs)
|
||||||
|
- [COPYRIGHT](#copyright)
|
||||||
|
|
||||||
# INSTALLATION
|
# INSTALLATION
|
||||||
|
|
||||||
@@ -12,181 +20,136 @@ To install it right away for all UNIX users (Linux, OS X, etc.), type:
|
|||||||
|
|
||||||
If you do not have curl, you can alternatively use a recent wget:
|
If you do not have curl, you can alternatively use a recent wget:
|
||||||
|
|
||||||
sudo wget https://yt-dl.org/downloads/2014.05.13/youtube-dl -O /usr/local/bin/youtube-dl
|
sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
||||||
sudo chmod a+x /usr/local/bin/youtube-dl
|
sudo chmod a+x /usr/local/bin/youtube-dl
|
||||||
|
|
||||||
Windows users can [download a .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in their home directory or any other location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
|
Windows users can [download a .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in their home directory or any other location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
|
||||||
|
|
||||||
|
OS X users can install **youtube-dl** with [Homebrew](http://brew.sh/).
|
||||||
|
|
||||||
|
brew install youtube-dl
|
||||||
|
|
||||||
|
You can also use pip:
|
||||||
|
|
||||||
|
sudo pip install youtube-dl
|
||||||
|
|
||||||
Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
|
Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
|
||||||
|
|
||||||
# DESCRIPTION
|
# DESCRIPTION
|
||||||
**youtube-dl** is a small command-line program to download videos from
|
**youtube-dl** is a small command-line program to download videos from
|
||||||
YouTube.com and a few more sites. It requires the Python interpreter, version
|
YouTube.com and a few more sites. It requires the Python interpreter, version
|
||||||
2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
|
2.6, 2.7, or 3.2+, and it is not platform specific. It should work on
|
||||||
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
||||||
which means you can modify it, redistribute it or use it however you like.
|
which means you can modify it, redistribute it or use it however you like.
|
||||||
|
|
||||||
|
youtube-dl [OPTIONS] URL [URL...]
|
||||||
|
|
||||||
# OPTIONS
|
# OPTIONS
|
||||||
-h, --help print this help text and exit
|
-h, --help print this help text and exit
|
||||||
--version print program version and exit
|
--version print program version and exit
|
||||||
-U, --update update this program to latest version. Make
|
-U, --update update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)
|
||||||
sure that you have sufficient permissions
|
-i, --ignore-errors continue on download errors, for example to skip unavailable videos in a playlist
|
||||||
(run with sudo if needed)
|
--abort-on-error Abort downloading of further videos (in the playlist or the command line) if an error occurs
|
||||||
-i, --ignore-errors continue on download errors, for example to
|
|
||||||
skip unavailable videos in a playlist
|
|
||||||
--abort-on-error Abort downloading of further videos (in the
|
|
||||||
playlist or the command line) if an error
|
|
||||||
occurs
|
|
||||||
--dump-user-agent display the current browser identification
|
--dump-user-agent display the current browser identification
|
||||||
--user-agent UA specify a custom user agent
|
--list-extractors List all supported extractors and the URLs they would handle
|
||||||
--referer REF specify a custom referer, use if the video
|
--extractor-descriptions Output descriptions of all supported extractors
|
||||||
access is restricted to one domain
|
--default-search PREFIX Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple".
|
||||||
--add-header FIELD:VALUE specify a custom HTTP header and its value,
|
Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The
|
||||||
separated by a colon ':'. You can use this
|
default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.
|
||||||
option multiple times
|
--ignore-config Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: Do not read the user configuration
|
||||||
--list-extractors List all supported extractors and the URLs
|
in ~/.config/youtube-dl/config (%APPDATA%/youtube-dl/config.txt on Windows)
|
||||||
they would handle
|
--flat-playlist Do not extract the videos of a playlist, only list them.
|
||||||
--extractor-descriptions Output descriptions of all supported
|
--no-color Do not emit color codes in output.
|
||||||
extractors
|
|
||||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in
|
## Network Options:
|
||||||
an empty string (--proxy "") for direct
|
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection
|
||||||
connection
|
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
||||||
--no-check-certificate Suppress HTTPS certificate validation.
|
--source-address IP Client-side IP address to bind to (experimental)
|
||||||
--prefer-insecure Use an unencrypted connection to retrieve
|
-4, --force-ipv4 Make all connections via IPv4 (experimental)
|
||||||
information about the video. (Currently
|
-6, --force-ipv6 Make all connections via IPv6 (experimental)
|
||||||
supported only for YouTube)
|
--cn-verification-proxy URL Use this proxy to verify the IP address for some Chinese sites. The default proxy specified by --proxy (or none, if the options is
|
||||||
--cache-dir DIR Location in the filesystem where youtube-dl
|
not present) is used for the actual downloading. (experimental)
|
||||||
can store some downloaded information
|
|
||||||
permanently. By default $XDG_CACHE_HOME
|
|
||||||
/youtube-dl or ~/.cache/youtube-dl . At the
|
|
||||||
moment, only YouTube player files (for
|
|
||||||
videos with obfuscated signatures) are
|
|
||||||
cached, but that may change.
|
|
||||||
--no-cache-dir Disable filesystem caching
|
|
||||||
--socket-timeout None Time to wait before giving up, in seconds
|
|
||||||
--bidi-workaround Work around terminals that lack
|
|
||||||
bidirectional text support. Requires bidiv
|
|
||||||
or fribidi executable in PATH
|
|
||||||
--default-search PREFIX Use this prefix for unqualified URLs. For
|
|
||||||
example "gvsearch2:" downloads two videos
|
|
||||||
from google videos for youtube-dl "large
|
|
||||||
apple". Use the value "auto" to let
|
|
||||||
youtube-dl guess. The default value "error"
|
|
||||||
just throws an error.
|
|
||||||
--ignore-config Do not read configuration files. When given
|
|
||||||
in the global configuration file /etc
|
|
||||||
/youtube-dl.conf: do not read the user
|
|
||||||
configuration in ~/.config/youtube-dl.conf
|
|
||||||
(%APPDATA%/youtube-dl/config.txt on
|
|
||||||
Windows)
|
|
||||||
--encoding ENCODING Force the specified encoding (experimental)
|
|
||||||
|
|
||||||
## Video Selection:
|
## Video Selection:
|
||||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||||
--playlist-end NUMBER playlist video to end at (default is last)
|
--playlist-end NUMBER playlist video to end at (default is last)
|
||||||
--match-title REGEX download only matching titles (regex or
|
--playlist-items ITEM_SPEC playlist video items to download. Specify indices of the videos in the playlist seperated by commas like: "--playlist-items 1,2,5,8"
|
||||||
caseless sub-string)
|
if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will
|
||||||
--reject-title REGEX skip download for matching titles (regex or
|
download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.
|
||||||
caseless sub-string)
|
--match-title REGEX download only matching titles (regex or caseless sub-string)
|
||||||
|
--reject-title REGEX skip download for matching titles (regex or caseless sub-string)
|
||||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||||
--min-filesize SIZE Do not download any videos smaller than
|
--min-filesize SIZE Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)
|
||||||
SIZE (e.g. 50k or 44.6m)
|
--max-filesize SIZE Do not download any videos larger than SIZE (e.g. 50k or 44.6m)
|
||||||
--max-filesize SIZE Do not download any videos larger than SIZE
|
|
||||||
(e.g. 50k or 44.6m)
|
|
||||||
--date DATE download only videos uploaded in this date
|
--date DATE download only videos uploaded in this date
|
||||||
--datebefore DATE download only videos uploaded on or before
|
--datebefore DATE download only videos uploaded on or before this date (i.e. inclusive)
|
||||||
this date (i.e. inclusive)
|
--dateafter DATE download only videos uploaded on or after this date (i.e. inclusive)
|
||||||
--dateafter DATE download only videos uploaded on or after
|
--min-views COUNT Do not download any videos with less than COUNT views
|
||||||
this date (i.e. inclusive)
|
--max-views COUNT Do not download any videos with more than COUNT views
|
||||||
--min-views COUNT Do not download any videos with less than
|
--match-filter FILTER (Experimental) Generic video filter. Specify any key (see help for -o for a list of available keys) to match if the key is present,
|
||||||
COUNT views
|
!key to check if the key is not present,key > NUMBER (like "comment_count > 12", also works with >=, <, <=, !=, =) to compare against
|
||||||
--max-views COUNT Do not download any videos with more than
|
a number, and & to require multiple matches. Values which are not known are excluded unless you put a question mark (?) after the
|
||||||
COUNT views
|
operator.For example, to only match videos that have been liked more than 100 times and disliked less than 50 times (or the dislike
|
||||||
--no-playlist download only the currently playing video
|
functionality is not available at the given service), but who also have a description, use --match-filter "like_count > 100 &
|
||||||
--age-limit YEARS download only videos suitable for the given
|
dislike_count <? 50 & description" .
|
||||||
age
|
--no-playlist If the URL refers to a video and a playlist, download only the video.
|
||||||
--download-archive FILE Download only videos not listed in the
|
--yes-playlist If the URL refers to a video and a playlist, download the playlist.
|
||||||
archive file. Record the IDs of all
|
--age-limit YEARS download only videos suitable for the given age
|
||||||
downloaded videos in it.
|
--download-archive FILE Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.
|
||||||
--include-ads Download advertisements as well
|
--include-ads Download advertisements as well (experimental)
|
||||||
(experimental)
|
|
||||||
--youtube-include-dash-manifest Try to download the DASH manifest on
|
|
||||||
YouTube videos (experimental)
|
|
||||||
|
|
||||||
## Download Options:
|
## Download Options:
|
||||||
-r, --rate-limit LIMIT maximum download rate in bytes per second
|
-r, --rate-limit LIMIT maximum download rate in bytes per second (e.g. 50K or 4.2M)
|
||||||
(e.g. 50K or 4.2M)
|
-R, --retries RETRIES number of retries (default is 10), or "infinite".
|
||||||
-R, --retries RETRIES number of retries (default is 10)
|
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K) (default is 1024)
|
||||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
|
--no-resize-buffer do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.
|
||||||
(default is 1024)
|
--playlist-reverse Download playlist videos in reverse order
|
||||||
--no-resize-buffer do not automatically adjust the buffer
|
--xattr-set-filesize (experimental) set file xattribute ytdl.filesize with expected filesize
|
||||||
size. By default, the buffer size is
|
--hls-prefer-native (experimental) Use the native HLS downloader instead of ffmpeg.
|
||||||
automatically resized from an initial value
|
--external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,curl,wget
|
||||||
of SIZE.
|
--external-downloader-args ARGS Give these arguments to the external downloader.
|
||||||
|
|
||||||
## Filesystem Options:
|
## Filesystem Options:
|
||||||
-t, --title use title in file name (default)
|
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||||
--id use only video ID in file name
|
--id use only video ID in file name
|
||||||
|
-o, --output TEMPLATE output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader
|
||||||
|
nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(format)s for
|
||||||
|
the format description (like "22 - 1280x720" or "HD"), %(format_id)s for the unique id of the format (like Youtube's itags: "137"),
|
||||||
|
%(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id,
|
||||||
|
%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in,
|
||||||
|
%(playlist_index)s for the position in the playlist. %(height)s and %(width)s for the width and height of the video format.
|
||||||
|
%(resolution)s for a textual description of the resolution of the video format. %% for a literal percent. Use - to output to stdout.
|
||||||
|
Can also be used to download to a different directory, for example with -o '/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||||
|
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given
|
||||||
|
--restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames
|
||||||
|
-A, --auto-number [deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] number downloaded files starting from 00000
|
||||||
|
-t, --title [deprecated] use title in file name (default)
|
||||||
-l, --literal [deprecated] alias of --title
|
-l, --literal [deprecated] alias of --title
|
||||||
-A, --auto-number number downloaded files starting from 00000
|
|
||||||
-o, --output TEMPLATE output filename template. Use %(title)s to
|
|
||||||
get the title, %(uploader)s for the
|
|
||||||
uploader name, %(uploader_id)s for the
|
|
||||||
uploader nickname if different,
|
|
||||||
%(autonumber)s to get an automatically
|
|
||||||
incremented number, %(ext)s for the
|
|
||||||
filename extension, %(format)s for the
|
|
||||||
format description (like "22 - 1280x720" or
|
|
||||||
"HD"), %(format_id)s for the unique id of
|
|
||||||
the format (like Youtube's itags: "137"),
|
|
||||||
%(upload_date)s for the upload date
|
|
||||||
(YYYYMMDD), %(extractor)s for the provider
|
|
||||||
(youtube, metacafe, etc), %(id)s for the
|
|
||||||
video id, %(playlist)s for the playlist the
|
|
||||||
video is in, %(playlist_index)s for the
|
|
||||||
position in the playlist and %% for a
|
|
||||||
literal percent. %(height)s and %(width)s
|
|
||||||
for the width and height of the video
|
|
||||||
format. %(resolution)s for a textual
|
|
||||||
description of the resolution of the video
|
|
||||||
format. Use - to output to stdout. Can also
|
|
||||||
be used to download to a different
|
|
||||||
directory, for example with -o '/my/downloa
|
|
||||||
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
|
||||||
--autonumber-size NUMBER Specifies the number of digits in
|
|
||||||
%(autonumber)s when it is present in output
|
|
||||||
filename template or --auto-number option
|
|
||||||
is given
|
|
||||||
--restrict-filenames Restrict filenames to only ASCII
|
|
||||||
characters, and avoid "&" and spaces in
|
|
||||||
filenames
|
|
||||||
-a, --batch-file FILE file containing URLs to download ('-' for
|
|
||||||
stdin)
|
|
||||||
--load-info FILE json file containing the video information
|
|
||||||
(created with the "--write-json" option)
|
|
||||||
-w, --no-overwrites do not overwrite files
|
-w, --no-overwrites do not overwrite files
|
||||||
-c, --continue force resume of partially downloaded files.
|
-c, --continue force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.
|
||||||
By default, youtube-dl will resume
|
--no-continue do not resume partially downloaded files (restart from beginning)
|
||||||
downloads if possible.
|
--no-part do not use .part files - write directly into output file
|
||||||
--no-continue do not resume partially downloaded files
|
--no-mtime do not use the Last-modified header to set the file modification time
|
||||||
(restart from beginning)
|
--write-description write video description to a .description file
|
||||||
--cookies FILE file to read cookies from and dump cookie
|
|
||||||
jar in
|
|
||||||
--no-part do not use .part files
|
|
||||||
--no-mtime do not use the Last-modified header to set
|
|
||||||
the file modification time
|
|
||||||
--write-description write video description to a .description
|
|
||||||
file
|
|
||||||
--write-info-json write video metadata to a .info.json file
|
--write-info-json write video metadata to a .info.json file
|
||||||
--write-annotations write video annotations to a .annotation
|
--write-annotations write video annotations to a .annotation file
|
||||||
file
|
--load-info FILE json file containing the video information (created with the "--write-json" option)
|
||||||
|
--cookies FILE file to read cookies from and dump cookie jar in
|
||||||
|
--cache-dir DIR Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl
|
||||||
|
or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may
|
||||||
|
change.
|
||||||
|
--no-cache-dir Disable filesystem caching
|
||||||
|
--rm-cache-dir Delete all filesystem cache files
|
||||||
|
|
||||||
|
## Thumbnail images:
|
||||||
--write-thumbnail write thumbnail image to disk
|
--write-thumbnail write thumbnail image to disk
|
||||||
|
--write-all-thumbnails write all thumbnail image formats to disk
|
||||||
|
--list-thumbnails Simulate and list all available thumbnail formats
|
||||||
|
|
||||||
## Verbosity / Simulation Options:
|
## Verbosity / Simulation Options:
|
||||||
-q, --quiet activates quiet mode
|
-q, --quiet activates quiet mode
|
||||||
--no-warnings Ignore warnings
|
--no-warnings Ignore warnings
|
||||||
-s, --simulate do not download the video and do not write
|
-s, --simulate do not download the video and do not write anything to disk
|
||||||
anything to disk
|
|
||||||
--skip-download do not download the video
|
--skip-download do not download the video
|
||||||
-g, --get-url simulate, quiet but print URL
|
-g, --get-url simulate, quiet but print URL
|
||||||
-e, --get-title simulate, quiet but print title
|
-e, --get-title simulate, quiet but print title
|
||||||
@@ -196,86 +159,91 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
--get-duration simulate, quiet but print video length
|
--get-duration simulate, quiet but print video length
|
||||||
--get-filename simulate, quiet but print output filename
|
--get-filename simulate, quiet but print output filename
|
||||||
--get-format simulate, quiet but print output format
|
--get-format simulate, quiet but print output format
|
||||||
-j, --dump-json simulate, quiet but print JSON information.
|
-j, --dump-json simulate, quiet but print JSON information. See --output for a description of available keys.
|
||||||
See --output for a description of available
|
-J, --dump-single-json simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist
|
||||||
keys.
|
information in a single line.
|
||||||
|
--print-json Be quiet and print the video information as JSON (video is still being downloaded).
|
||||||
--newline output progress bar as new lines
|
--newline output progress bar as new lines
|
||||||
--no-progress do not print progress bar
|
--no-progress do not print progress bar
|
||||||
--console-title display progress in console titlebar
|
--console-title display progress in console titlebar
|
||||||
-v, --verbose print various debugging information
|
-v, --verbose print various debugging information
|
||||||
--dump-intermediate-pages print downloaded pages to debug problems
|
--dump-pages print downloaded pages to debug problems (very verbose)
|
||||||
(very verbose)
|
--write-pages Write downloaded intermediary pages to files in the current directory to debug problems
|
||||||
--write-pages Write downloaded intermediary pages to
|
|
||||||
files in the current directory to debug
|
|
||||||
problems
|
|
||||||
--print-traffic Display sent and read HTTP traffic
|
--print-traffic Display sent and read HTTP traffic
|
||||||
|
-C, --call-home Contact the youtube-dl server for debugging.
|
||||||
|
--no-call-home Do NOT contact the youtube-dl server for debugging.
|
||||||
|
|
||||||
|
## Workarounds:
|
||||||
|
--encoding ENCODING Force the specified encoding (experimental)
|
||||||
|
--no-check-certificate Suppress HTTPS certificate validation.
|
||||||
|
--prefer-insecure Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)
|
||||||
|
--user-agent UA specify a custom user agent
|
||||||
|
--referer URL specify a custom referer, use if the video access is restricted to one domain
|
||||||
|
--add-header FIELD:VALUE specify a custom HTTP header and its value, separated by a colon ':'. You can use this option multiple times
|
||||||
|
--bidi-workaround Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH
|
||||||
|
--sleep-interval SECONDS Number of seconds to sleep before each download.
|
||||||
|
|
||||||
## Video Format Options:
|
## Video Format Options:
|
||||||
-f, --format FORMAT video format code, specify the order of
|
-f, --format FORMAT video format code, specify the order of preference using slashes, as in -f 22/17/18 . Instead of format codes, you can select by
|
||||||
preference using slashes: "-f 22/17/18".
|
extension for the extensions aac, m4a, mp3, mp4, ogg, wav, webm. You can also use the special names "best", "bestvideo", "bestaudio",
|
||||||
"-f mp4" and "-f flv" are also supported.
|
"worst". You can filter the video results by putting a condition in brackets, as in -f "best[height=720]" (or -f "[filesize>10M]").
|
||||||
You can also use the special names "best",
|
This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec,
|
||||||
"bestvideo", "bestaudio", "worst",
|
vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a
|
||||||
"worstvideo" and "worstaudio". By default,
|
question mark (?) after the operator. You can combine format filters, so -f "[height <=? 720][tbr>500]" selects up to 720p videos
|
||||||
youtube-dl will pick the best quality.
|
(or videos where the height is not known) with a bitrate of at least 500 KBit/s. By default, youtube-dl will pick the best quality.
|
||||||
|
Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio. You can merge the video and audio
|
||||||
|
of two formats into a single file using -f <video-format>+<audio-format> (requires ffmpeg or avconv), for example -f
|
||||||
|
bestvideo+bestaudio.
|
||||||
--all-formats download all available video formats
|
--all-formats download all available video formats
|
||||||
--prefer-free-formats prefer free video formats unless a specific
|
--prefer-free-formats prefer free video formats unless a specific one is requested
|
||||||
one is requested
|
|
||||||
--max-quality FORMAT highest quality format to download
|
--max-quality FORMAT highest quality format to download
|
||||||
-F, --list-formats list all available formats
|
-F, --list-formats list all available formats
|
||||||
|
--youtube-skip-dash-manifest Do not download the DASH manifest on YouTube videos
|
||||||
|
--merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv.Ignored if no
|
||||||
|
merge is required
|
||||||
|
|
||||||
## Subtitle Options:
|
## Subtitle Options:
|
||||||
--write-sub write subtitle file
|
--write-sub write subtitle file
|
||||||
--write-auto-sub write automatic subtitle file (youtube
|
--write-auto-sub write automatic subtitle file (youtube only)
|
||||||
only)
|
--all-subs downloads all the available subtitles of the video
|
||||||
--all-subs downloads all the available subtitles of
|
|
||||||
the video
|
|
||||||
--list-subs lists all available subtitles for the video
|
--list-subs lists all available subtitles for the video
|
||||||
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt]
|
--sub-format FORMAT subtitle format, accepts formats preference, for example: "ass/srt/best"
|
||||||
youtube only)
|
--sub-lang LANGS languages of the subtitles to download (optional) separated by commas, use IETF language tags like 'en,pt'
|
||||||
--sub-lang LANGS languages of the subtitles to download
|
|
||||||
(optional) separated by commas, use IETF
|
|
||||||
language tags like 'en,pt'
|
|
||||||
|
|
||||||
## Authentication Options:
|
## Authentication Options:
|
||||||
-u, --username USERNAME account username
|
-u, --username USERNAME login with this account ID
|
||||||
-p, --password PASSWORD account password
|
-p, --password PASSWORD account password. If this option is left out, youtube-dl will ask interactively.
|
||||||
|
-2, --twofactor TWOFACTOR two-factor auth code
|
||||||
-n, --netrc use .netrc authentication data
|
-n, --netrc use .netrc authentication data
|
||||||
--video-password PASSWORD video password (vimeo, smotri)
|
--video-password PASSWORD video password (vimeo, smotri)
|
||||||
|
|
||||||
## Post-processing Options:
|
## Post-processing Options:
|
||||||
-x, --extract-audio convert video files to audio-only files
|
-x, --extract-audio convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)
|
||||||
(requires ffmpeg or avconv and ffprobe or
|
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "best" by default
|
||||||
avprobe)
|
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K
|
||||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
|
(default 5)
|
||||||
"opus", or "wav"; best by default
|
--recode-video FORMAT Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)
|
||||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
|
-k, --keep-video keeps the video file on disk after the post-processing; the video is erased by default
|
||||||
insert a value between 0 (better) and 9
|
--no-post-overwrites do not overwrite post-processed files; the post-processed files are overwritten by default
|
||||||
(worse) for VBR or a specific bitrate like
|
--embed-subs embed subtitles in the video (only for mp4 videos)
|
||||||
128K (default 5)
|
|
||||||
--recode-video FORMAT Encode the video to another format if
|
|
||||||
necessary (currently supported:
|
|
||||||
mp4|flv|ogg|webm)
|
|
||||||
-k, --keep-video keeps the video file on disk after the
|
|
||||||
post-processing; the video is erased by
|
|
||||||
default
|
|
||||||
--no-post-overwrites do not overwrite post-processed files; the
|
|
||||||
post-processed files are overwritten by
|
|
||||||
default
|
|
||||||
--embed-subs embed subtitles in the video (only for mp4
|
|
||||||
videos)
|
|
||||||
--embed-thumbnail embed thumbnail in the audio as cover art
|
--embed-thumbnail embed thumbnail in the audio as cover art
|
||||||
--add-metadata write metadata to the video file
|
--add-metadata write metadata to the video file
|
||||||
--xattrs write metadata to the video file's xattrs
|
--metadata-from-title FORMAT parse additional metadata like song title / artist from the video title. The format syntax is the same as --output, the parsed
|
||||||
(using dublin core and xdg standards)
|
parameters replace existing values. Additional templates: %(album), %(artist). Example: --metadata-from-title "%(artist)s -
|
||||||
--prefer-avconv Prefer avconv over ffmpeg for running the
|
%(title)s" matches a title like "Coldplay - Paradise"
|
||||||
postprocessors (default)
|
--xattrs write metadata to the video file's xattrs (using dublin core and xdg standards)
|
||||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
|
--fixup POLICY Automatically correct known faults of the file. One of never (do nothing), warn (only emit a warning), detect_or_warn(the default;
|
||||||
postprocessors
|
fix file if we can, warn otherwise)
|
||||||
|
--prefer-avconv Prefer avconv over ffmpeg for running the postprocessors (default)
|
||||||
|
--prefer-ffmpeg Prefer ffmpeg over avconv for running the postprocessors
|
||||||
|
--ffmpeg-location PATH Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.
|
||||||
|
--exec CMD Execute a command on the file after downloading, similar to find's -exec syntax. Example: --exec 'adb push {} /sdcard/Music/ && rm
|
||||||
|
{}'
|
||||||
|
--convert-subtitles FORMAT Convert the subtitles to other format (currently supported: srt|ass|vtt)
|
||||||
|
|
||||||
# CONFIGURATION
|
# CONFIGURATION
|
||||||
|
|
||||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<Yourname>\youtube-dl.conf`.
|
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<user name>\youtube-dl.conf`.
|
||||||
|
|
||||||
# OUTPUT TEMPLATE
|
# OUTPUT TEMPLATE
|
||||||
|
|
||||||
@@ -296,10 +264,12 @@ The current default template is `%(title)s-%(id)s.%(ext)s`.
|
|||||||
|
|
||||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||||
|
|
||||||
|
```bash
|
||||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
|
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
|
||||||
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
|
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
|
||||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
|
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
|
||||||
youtube-dl_test_video_.mp4 # A simple file name
|
youtube-dl_test_video_.mp4 # A simple file name
|
||||||
|
```
|
||||||
|
|
||||||
# VIDEO SELECTION
|
# VIDEO SELECTION
|
||||||
|
|
||||||
@@ -310,6 +280,7 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
|
|||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
|
```bash
|
||||||
# Download only the videos uploaded in the last 6 months
|
# Download only the videos uploaded in the last 6 months
|
||||||
$ youtube-dl --dateafter now-6months
|
$ youtube-dl --dateafter now-6months
|
||||||
|
|
||||||
@@ -318,9 +289,42 @@ Examples:
|
|||||||
|
|
||||||
$ # will only download the videos uploaded in the 200x decade
|
$ # will only download the videos uploaded in the 200x decade
|
||||||
$ youtube-dl --dateafter 20000101 --datebefore 20091231
|
$ youtube-dl --dateafter 20000101 --datebefore 20091231
|
||||||
|
```
|
||||||
|
|
||||||
# FAQ
|
# FAQ
|
||||||
|
|
||||||
|
### How do I update youtube-dl?
|
||||||
|
|
||||||
|
If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
|
||||||
|
|
||||||
|
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
|
||||||
|
|
||||||
|
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
|
||||||
|
|
||||||
|
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
|
||||||
|
|
||||||
|
sudo apt-get remove -y youtube-dl
|
||||||
|
|
||||||
|
Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
||||||
|
sudo chmod a+x /usr/local/bin/youtube-dl
|
||||||
|
hash -r
|
||||||
|
```
|
||||||
|
|
||||||
|
Again, from then on you'll be able to update with `sudo youtube-dl -U`.
|
||||||
|
|
||||||
|
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
|
||||||
|
|
||||||
|
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
||||||
|
|
||||||
|
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging guys](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
|
||||||
|
|
||||||
|
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
|
||||||
|
|
||||||
|
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
|
||||||
|
|
||||||
### Can you please put the -b option back?
|
### Can you please put the -b option back?
|
||||||
|
|
||||||
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
|
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
|
||||||
@@ -333,17 +337,31 @@ Apparently YouTube requires you to pass a CAPTCHA test if you download too much.
|
|||||||
|
|
||||||
Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/).
|
Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/).
|
||||||
|
|
||||||
### The links provided by youtube-dl -g are not working anymore
|
### I extracted a video URL with -g, but it does not play on another machine / in my webbrowser.
|
||||||
|
|
||||||
The URLs youtube-dl outputs require the downloader to have the correct cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
|
It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
|
||||||
|
|
||||||
|
It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule.
|
||||||
|
|
||||||
|
Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using -g, your own downloader must support these as well.
|
||||||
|
|
||||||
|
If you want to play the video on a machine that is not running youtube-dl, you can relay the video content from the machine that runs youtube-dl. You can use `-o -` to let youtube-dl stream a video to stdout, or simply allow the player to download the files written by youtube-dl in turn.
|
||||||
|
|
||||||
### ERROR: no fmt_url_map or conn information found in video info
|
### ERROR: no fmt_url_map or conn information found in video info
|
||||||
|
|
||||||
youtube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
|
YouTube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
|
||||||
|
|
||||||
### ERROR: unable to download video ###
|
### ERROR: unable to download video ###
|
||||||
|
|
||||||
youtube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
|
YouTube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
|
||||||
|
|
||||||
|
### ExtractorError: Could not find JS function u'OF'
|
||||||
|
|
||||||
|
In February 2015, the new YouTube player contained a character sequence in a string that was misinterpreted by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
|
||||||
|
|
||||||
|
### HTTP Error 429: Too Many Requests or 402: Payment Required
|
||||||
|
|
||||||
|
These two error codes indicate that the service is blocking your IP address because of overuse. Contact the service and ask them to unblock your IP address, or - if you have acquired a whitelisted IP address already - use the [`--proxy` or `--network-address` options](#network-options) to select another IP address.
|
||||||
|
|
||||||
### SyntaxError: Non-ASCII character ###
|
### SyntaxError: Non-ASCII character ###
|
||||||
|
|
||||||
@@ -362,6 +380,53 @@ Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unz
|
|||||||
|
|
||||||
To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29).
|
To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29).
|
||||||
|
|
||||||
|
### On Windows, how should I set up ffmpeg and youtube-dl? Where should I put the exe files?
|
||||||
|
|
||||||
|
If you put youtube-dl and ffmpeg in the same directory that you're running the command from, it will work, but that's rather cumbersome.
|
||||||
|
|
||||||
|
To make a different directory work - either for ffmpeg, or for youtube-dl, or for both - simply create the directory (say, `C:\bin`, or `C:\Users\<User name>\bin`), put all the executables directly in there, and then [set your PATH environment variable](https://www.java.com/en/download/help/path.xml) to include that directory.
|
||||||
|
|
||||||
|
From then on, after restarting your shell, you will be able to access both youtube-dl and ffmpeg (and youtube-dl will be able to find ffmpeg) by simply typing `youtube-dl` or `ffmpeg`, no matter what directory you're in.
|
||||||
|
|
||||||
|
### How do I put downloads into a specific folder?
|
||||||
|
|
||||||
|
Use the `-o` to specify an [output template](#output-template), for example `-o "/home/user/videos/%(title)s-%(id)s.%(ext)s"`. If you want this for all of your downloads, put the option into your [configuration file](#configuration).
|
||||||
|
|
||||||
|
### How do I download a video starting with a `-` ?
|
||||||
|
|
||||||
|
Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the options with `--`:
|
||||||
|
|
||||||
|
youtube-dl -- -wNyEUrxzFU
|
||||||
|
youtube-dl "http://www.youtube.com/watch?v=-wNyEUrxzFU"
|
||||||
|
|
||||||
|
### Can you add support for this anime video site, or site which shows current movies for free?
|
||||||
|
|
||||||
|
As a matter of policy (as well as legality), youtube-dl does not include support for services that specialize in infringing copyright. As a rule of thumb, if you cannot easily find a video that the service is quite obviously allowed to distribute (i.e. that has been uploaded by the creator, the creator's distributor, or is published under a free license), the service is probably unfit for inclusion to youtube-dl.
|
||||||
|
|
||||||
|
A note on the service that they don't host the infringing content, but just link to those who do, is evidence that the service should **not** be included into youtube-dl. The same goes for any DMCA note when the whole front page of the service is filled with videos they are not allowed to distribute. A "fair use" note is equally unconvincing if the service shows copyright-protected videos in full without authorization.
|
||||||
|
|
||||||
|
Support requests for services that **do** purchase the rights to distribute their content are perfectly fine though. If in doubt, you can simply include a source that mentions the legitimate purchase of content.
|
||||||
|
|
||||||
|
### How can I speed up work on my issue?
|
||||||
|
|
||||||
|
(Also known as: Help, my important issue not being solved!) The youtube-dl core developer team is quite small. While we do our best to solve as many issues as possible, sometimes that can take quite a while. To speed up your issue, here's what you can do:
|
||||||
|
|
||||||
|
First of all, please do report the issue [at our issue tracker](https://yt-dl.org/bugs). That allows us to coordinate all efforts by users and developers, and serves as a unified point. Unfortunately, the youtube-dl project has grown too large to use personal email as an effective communication channel.
|
||||||
|
|
||||||
|
Please read the [bug reporting instructions](#bugs) below. A lot of bugs lack all the necessary information. If you can, offer proxy, VPN, or shell access to the youtube-dl developers. If you are able to, test the issue from multiple computers in multiple countries to exclude local censorship or misconfiguration issues.
|
||||||
|
|
||||||
|
If nobody is interested in solving your issue, you are welcome to take matters into your own hands and submit a pull request (or coerce/pay somebody else to do so).
|
||||||
|
|
||||||
|
Feel free to bump the issue from time to time by writing a small comment ("Issue is still present in youtube-dl version ...from France, but fixed from Belgium"), but please not more than once a month. Please do not declare your issue as `important` or `urgent`.
|
||||||
|
|
||||||
|
### How can I detect whether a given URL is supported by youtube-dl?
|
||||||
|
|
||||||
|
For one, have a look at the [list of supported sites](docs/supportedsites.md). Note that it can sometimes happen that the site changes its URL scheme (say, from http://example.com/video/1234567 to http://example.com/v/1234567 ) and youtube-dl reports an URL of a service in that list as unsupported. In that case, simply report a bug.
|
||||||
|
|
||||||
|
It is *not* possible to detect whether a URL is supported or not. That's because youtube-dl contains a generic extractor which matches **all** URLs. You may be tempted to disable, exclude, or remove the generic extractor, but the generic extractor not only allows users to extract videos from lots of websites that embed a video from another service, but may also be used to extract video from a service that it's hosting itself. Therefore, we neither recommend nor support disabling, excluding, or removing the generic extractor.
|
||||||
|
|
||||||
|
If you want to find out whether a given URL is supported, simply call youtube-dl with it. If you get no videos back, chances are the URL is either not referring to a video or unsupported. You can find out which by examining the output (if you run youtube-dl on the console) or catching an `UnsupportedError` exception if you run it from a Python program.
|
||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
Most users do not need to build youtube-dl and can [download the builds](http://rg3.github.io/youtube-dl/download.html) or get them from their distribution.
|
||||||
@@ -392,12 +457,10 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||||
3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
|
3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
|
||||||
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
|
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
|
||||||
|
```python
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
@@ -405,11 +468,12 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://yourextractor.com/watch/42',
|
'url': 'http://yourextractor.com/watch/42',
|
||||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '42',
|
'id': '42',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Video title goes here',
|
'title': 'Video title goes here',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
# TODO more properties, either as:
|
# TODO more properties, either as:
|
||||||
# * A value
|
# * A value
|
||||||
# * MD5 checksum; start the string with md5:
|
# * MD5 checksum; start the string with md5:
|
||||||
@@ -419,25 +483,24 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
# TODO more code goes here, for example ...
|
# TODO more code goes here, for example ...
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||||
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
|
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||||
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
|
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/__init__.py
|
$ git add youtube_dl/extractor/__init__.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
@@ -448,15 +511,69 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
|
|
||||||
In any case, thank you very much for your contributions!
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
|
# EMBEDDING YOUTUBE-DL
|
||||||
|
|
||||||
|
youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/rg3/youtube-dl/issues/new).
|
||||||
|
|
||||||
|
From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
import youtube_dl
|
||||||
|
|
||||||
|
ydl_opts = {}
|
||||||
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||||
|
```
|
||||||
|
|
||||||
|
Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
||||||
|
|
||||||
|
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
import youtube_dl
|
||||||
|
|
||||||
|
|
||||||
|
class MyLogger(object):
|
||||||
|
def debug(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def warning(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def error(self, msg):
|
||||||
|
print(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def my_hook(d):
|
||||||
|
if d['status'] == 'finished':
|
||||||
|
print('Done downloading, now converting ...')
|
||||||
|
|
||||||
|
|
||||||
|
ydl_opts = {
|
||||||
|
'format': 'bestaudio/best',
|
||||||
|
'postprocessors': [{
|
||||||
|
'key': 'FFmpegExtractAudio',
|
||||||
|
'preferredcodec': 'mp3',
|
||||||
|
'preferredquality': '192',
|
||||||
|
}],
|
||||||
|
'logger': MyLogger(),
|
||||||
|
'progress_hooks': [my_hook],
|
||||||
|
}
|
||||||
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||||
|
```
|
||||||
|
|
||||||
# BUGS
|
# BUGS
|
||||||
|
|
||||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.
|
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode.
|
||||||
|
|
||||||
Please include the full output of the command when run with `--verbose`. The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
**Please include the full output of youtube-dl when run with `-v`**.
|
||||||
|
|
||||||
For discussions, join us in the irc channel #youtube-dl on freenode.
|
The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||||
|
|
||||||
When you submit a request, please re-read it once to avoid a couple of mistakes (you can and should use this as a checklist):
|
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||||
|
|
||||||
### Is the description of the issue itself sufficient?
|
### Is the description of the issue itself sufficient?
|
||||||
|
|
||||||
@@ -472,7 +589,9 @@ If your report is shorter than two lines, it is almost certainly missing some of
|
|||||||
|
|
||||||
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||||
|
|
||||||
Site support requests **must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||||
|
|
||||||
|
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
||||||
|
|
||||||
### Are you using the latest version?
|
### Are you using the latest version?
|
||||||
|
|
||||||
@@ -500,7 +619,7 @@ In particular, every site support request issue should only pertain to services
|
|||||||
|
|
||||||
### Is anyone going to need the feature?
|
### Is anyone going to need the feature?
|
||||||
|
|
||||||
Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
Only post features that you (or an incapacitated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
||||||
|
|
||||||
### Is your question about youtube-dl?
|
### Is your question about youtube-dl?
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from os.path import dirname as dirn
|
from os.path import dirname as dirn
|
||||||
import sys
|
import sys
|
||||||
@@ -9,6 +11,7 @@ import youtube_dl
|
|||||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||||
|
|
||||||
|
|
||||||
def build_completion(opt_parser):
|
def build_completion(opt_parser):
|
||||||
opts_flag = []
|
opts_flag = []
|
||||||
for group in opt_parser.option_groups:
|
for group in opt_parser.option_groups:
|
||||||
|
|||||||
@@ -233,6 +233,7 @@ def rmtree(path):
|
|||||||
|
|
||||||
#==============================================================================
|
#==============================================================================
|
||||||
|
|
||||||
|
|
||||||
class BuildError(Exception):
|
class BuildError(Exception):
|
||||||
def __init__(self, output, code=500):
|
def __init__(self, output, code=500):
|
||||||
self.output = output
|
self.output = output
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
"""
|
"""
|
||||||
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
|
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
|
||||||
@@ -27,7 +28,7 @@ for test in get_testcases():
|
|||||||
if METHOD == 'EURISTIC':
|
if METHOD == 'EURISTIC':
|
||||||
try:
|
try:
|
||||||
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
||||||
except:
|
except Exception:
|
||||||
print('\nFail: {0}'.format(test['name']))
|
print('\nFail: {0}'.format(test['name']))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -44,12 +45,12 @@ for test in get_testcases():
|
|||||||
|
|
||||||
RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
|
RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
|
||||||
|
|
||||||
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
|
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or
|
||||||
or test['info_dict']['age_limit'] != 18):
|
test['info_dict']['age_limit'] != 18):
|
||||||
print('\nPotential missing age_limit check: {0}'.format(test['name']))
|
print('\nPotential missing age_limit check: {0}'.format(test['name']))
|
||||||
|
|
||||||
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
|
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and
|
||||||
and test['info_dict']['age_limit'] == 18):
|
test['info_dict']['age_limit'] == 18):
|
||||||
print('\nPotential false negative: {0}'.format(test['name']))
|
print('\nPotential false negative: {0}'.format(test['name']))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|||||||
5
devscripts/fish-completion.in
Normal file
5
devscripts/fish-completion.in
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
|
||||||
|
{{commands}}
|
||||||
|
|
||||||
|
|
||||||
|
complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
|
||||||
48
devscripts/fish-completion.py
Executable file
48
devscripts/fish-completion.py
Executable file
@@ -0,0 +1,48 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
from os.path import dirname as dirn
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
||||||
|
import youtube_dl
|
||||||
|
from youtube_dl.utils import shell_quote
|
||||||
|
|
||||||
|
FISH_COMPLETION_FILE = 'youtube-dl.fish'
|
||||||
|
FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
|
||||||
|
|
||||||
|
EXTRA_ARGS = {
|
||||||
|
'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'],
|
||||||
|
|
||||||
|
# Options that need a file parameter
|
||||||
|
'download-archive': ['--require-parameter'],
|
||||||
|
'cookies': ['--require-parameter'],
|
||||||
|
'load-info': ['--require-parameter'],
|
||||||
|
'batch-file': ['--require-parameter'],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def build_completion(opt_parser):
|
||||||
|
commands = []
|
||||||
|
|
||||||
|
for group in opt_parser.option_groups:
|
||||||
|
for option in group.option_list:
|
||||||
|
long_option = option.get_opt_string().strip('-')
|
||||||
|
complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
|
||||||
|
if option._short_opts:
|
||||||
|
complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
|
||||||
|
if option.help != optparse.SUPPRESS_HELP:
|
||||||
|
complete_cmd += ['--description', option.help]
|
||||||
|
complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
|
||||||
|
commands.append(shell_quote(complete_cmd))
|
||||||
|
|
||||||
|
with open(FISH_COMPLETION_TEMPLATE) as f:
|
||||||
|
template = f.read()
|
||||||
|
filled_template = template.replace('{{commands}}', '\n'.join(commands))
|
||||||
|
with open(FISH_COMPLETION_FILE, 'w') as f:
|
||||||
|
f.write(filled_template)
|
||||||
|
|
||||||
|
parser = youtube_dl.parseOpts()[0]
|
||||||
|
build_completion(parser)
|
||||||
42
devscripts/generate_aes_testdata.py
Normal file
42
devscripts/generate_aes_testdata.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from youtube_dl.utils import intlist_to_bytes
|
||||||
|
from youtube_dl.aes import aes_encrypt, key_expansion
|
||||||
|
|
||||||
|
secret_msg = b'Secret message goes here'
|
||||||
|
|
||||||
|
|
||||||
|
def hex_str(int_list):
|
||||||
|
return codecs.encode(intlist_to_bytes(int_list), 'hex')
|
||||||
|
|
||||||
|
|
||||||
|
def openssl_encode(algo, key, iv):
|
||||||
|
cmd = ['openssl', 'enc', '-e', '-' + algo, '-K', hex_str(key), '-iv', hex_str(iv)]
|
||||||
|
prog = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||||
|
out, _ = prog.communicate(secret_msg)
|
||||||
|
return out
|
||||||
|
|
||||||
|
iv = key = [0x20, 0x15] + 14 * [0]
|
||||||
|
|
||||||
|
r = openssl_encode('aes-128-cbc', key, iv)
|
||||||
|
print('aes_cbc_decrypt')
|
||||||
|
print(repr(r))
|
||||||
|
|
||||||
|
password = key
|
||||||
|
new_key = aes_encrypt(password, key_expansion(password))
|
||||||
|
r = openssl_encode('aes-128-ctr', new_key, iv)
|
||||||
|
print('aes_decrypt_text 16')
|
||||||
|
print(repr(r))
|
||||||
|
|
||||||
|
password = key + 16 * [0]
|
||||||
|
new_key = aes_encrypt(password, key_expansion(password)) * (32 // 16)
|
||||||
|
r = openssl_encode('aes-256-ctr', new_key, iv)
|
||||||
|
print('aes_decrypt_text 32')
|
||||||
|
print(repr(r))
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import unicode_literals, with_statement
|
||||||
|
|
||||||
import rsa
|
import rsa
|
||||||
import json
|
import json
|
||||||
@@ -29,4 +30,5 @@ signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).enc
|
|||||||
print('signature: ' + signature)
|
print('signature: ' + signature)
|
||||||
|
|
||||||
versions_info['signature'] = signature
|
versions_info['signature'] = signature
|
||||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
with open('update/versions.json', 'w') as versionsf:
|
||||||
|
json.dump(versions_info, versionsf, indent=4, sort_keys=True)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement, unicode_literals
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import glob
|
import glob
|
||||||
@@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year)
|
|||||||
for fn in glob.glob('*.html*'):
|
for fn in glob.glob('*.html*'):
|
||||||
with io.open(fn, encoding='utf-8') as f:
|
with io.open(fn, encoding='utf-8') as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
|
newc = re.sub(r'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', 'Copyright © 2006-' + year, content)
|
||||||
if content != newc:
|
if content != newc:
|
||||||
tmpFn = fn + '.part'
|
tmpFn = fn + '.part'
|
||||||
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
|
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import io
|
import io
|
||||||
@@ -73,4 +74,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
|||||||
|
|
||||||
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
||||||
atom_file.write(atom_template)
|
atom_file.write(atom_template)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
@@ -9,19 +10,20 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
|
|||||||
|
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
||||||
template = tmplf.read()
|
template = tmplf.read()
|
||||||
|
|
||||||
ie_htmls = []
|
ie_htmls = []
|
||||||
for ie in sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()):
|
for ie in youtube_dl.list_extractors(age_limit=None):
|
||||||
ie_html = '<b>{}</b>'.format(ie.IE_NAME)
|
ie_html = '<b>{}</b>'.format(ie.IE_NAME)
|
||||||
ie_desc = getattr(ie, 'IE_DESC', None)
|
ie_desc = getattr(ie, 'IE_DESC', None)
|
||||||
if ie_desc is False:
|
if ie_desc is False:
|
||||||
continue
|
continue
|
||||||
elif ie_desc is not None:
|
elif ie_desc is not None:
|
||||||
ie_html += ': {}'.format(ie.IE_DESC)
|
ie_html += ': {}'.format(ie.IE_DESC)
|
||||||
if ie.working() == False:
|
if not ie.working():
|
||||||
ie_html += ' (Currently broken)'
|
ie_html += ' (Currently broken)'
|
||||||
ie_htmls.append('<li>{}</li>'.format(ie_html))
|
ie_htmls.append('<li>{}</li>'.format(ie_html))
|
||||||
|
|
||||||
|
|||||||
32
devscripts/make_contributing.py
Executable file
32
devscripts/make_contributing.py
Executable file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import io
|
||||||
|
import optparse
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||||
|
options, args = parser.parse_args()
|
||||||
|
if len(args) != 2:
|
||||||
|
parser.error('Expected an input and an output filename')
|
||||||
|
|
||||||
|
infile, outfile = args
|
||||||
|
|
||||||
|
with io.open(infile, encoding='utf-8') as inf:
|
||||||
|
readme = inf.read()
|
||||||
|
|
||||||
|
bug_text = re.search(
|
||||||
|
r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
|
||||||
|
dev_text = re.search(
|
||||||
|
r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING YOUTUBE-DL',
|
||||||
|
readme).group(1)
|
||||||
|
|
||||||
|
out = bug_text + dev_text
|
||||||
|
|
||||||
|
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||||
|
outf.write(out)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import io
|
import io
|
||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
|
|||||||
45
devscripts/make_supportedsites.py
Normal file
45
devscripts/make_supportedsites.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import io
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
# Import youtube_dl
|
||||||
|
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
|
||||||
|
sys.path.append(ROOT_DIR)
|
||||||
|
import youtube_dl
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
|
||||||
|
options, args = parser.parse_args()
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.error('Expected an output filename')
|
||||||
|
|
||||||
|
outfile, = args
|
||||||
|
|
||||||
|
def gen_ies_md(ies):
|
||||||
|
for ie in ies:
|
||||||
|
ie_md = '**{0}**'.format(ie.IE_NAME)
|
||||||
|
ie_desc = getattr(ie, 'IE_DESC', None)
|
||||||
|
if ie_desc is False:
|
||||||
|
continue
|
||||||
|
if ie_desc is not None:
|
||||||
|
ie_md += ': {0}'.format(ie.IE_DESC)
|
||||||
|
if not ie.working():
|
||||||
|
ie_md += ' (Currently broken)'
|
||||||
|
yield ie_md
|
||||||
|
|
||||||
|
ies = sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower())
|
||||||
|
out = '# Supported sites\n' + ''.join(
|
||||||
|
' - ' + md + '\n'
|
||||||
|
for md in gen_ies_md(ies))
|
||||||
|
|
||||||
|
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||||
|
outf.write(out)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import io
|
import io
|
||||||
import os.path
|
import os.path
|
||||||
@@ -10,8 +11,19 @@ README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
|||||||
with io.open(README_FILE, encoding='utf-8') as f:
|
with io.open(README_FILE, encoding='utf-8') as f:
|
||||||
readme = f.read()
|
readme = f.read()
|
||||||
|
|
||||||
PREFIX = '%YOUTUBE-DL(1)\n\n# NAME\n'
|
PREFIX = '''%YOUTUBE-DL(1)
|
||||||
readme = re.sub(r'(?s)# INSTALLATION.*?(?=# DESCRIPTION)', '', readme)
|
|
||||||
|
# NAME
|
||||||
|
|
||||||
|
youtube\-dl \- download videos from youtube.com or other video platforms
|
||||||
|
|
||||||
|
# SYNOPSIS
|
||||||
|
|
||||||
|
**youtube-dl** \[OPTIONS\] URL [URL...]
|
||||||
|
|
||||||
|
'''
|
||||||
|
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
|
||||||
|
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
|
||||||
readme = PREFIX + readme
|
readme = PREFIX + readme
|
||||||
|
|
||||||
if sys.version_info < (3, 0):
|
if sys.version_info < (3, 0):
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $us
|
|||||||
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
||||||
|
|
||||||
/bin/echo -e "\n### First of all, testing..."
|
/bin/echo -e "\n### First of all, testing..."
|
||||||
make cleanall
|
make clean
|
||||||
if $skip_tests ; then
|
if $skip_tests ; then
|
||||||
echo 'SKIPPING TESTS'
|
echo 'SKIPPING TESTS'
|
||||||
else
|
else
|
||||||
@@ -45,9 +45,9 @@ fi
|
|||||||
/bin/echo -e "\n### Changing version in version.py..."
|
/bin/echo -e "\n### Changing version in version.py..."
|
||||||
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
||||||
|
|
||||||
/bin/echo -e "\n### Committing README.md and youtube_dl/version.py..."
|
/bin/echo -e "\n### Committing documentation and youtube_dl/version.py..."
|
||||||
make README.md
|
make README.md CONTRIBUTING.md supportedsites
|
||||||
git add README.md youtube_dl/version.py
|
git add README.md CONTRIBUTING.md docs/supportedsites.md youtube_dl/version.py
|
||||||
git commit -m "release $version"
|
git commit -m "release $version"
|
||||||
|
|
||||||
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
||||||
@@ -73,7 +73,6 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
|
|||||||
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
|
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
|
||||||
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
|
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
|
||||||
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
||||||
git checkout HEAD -- youtube-dl youtube-dl.exe
|
|
||||||
|
|
||||||
/bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
|
/bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
|
||||||
for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done
|
for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import sys, os
|
|
||||||
|
|
||||||
try:
|
|
||||||
import urllib.request as compat_urllib_request
|
|
||||||
except ImportError: # Python 2
|
|
||||||
import urllib2 as compat_urllib_request
|
|
||||||
|
|
||||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
|
||||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
|
|
||||||
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
|
|
||||||
|
|
||||||
try:
|
|
||||||
raw_input()
|
|
||||||
except NameError: # Python 3
|
|
||||||
input()
|
|
||||||
|
|
||||||
filename = sys.argv[0]
|
|
||||||
|
|
||||||
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
|
|
||||||
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
|
|
||||||
|
|
||||||
if not os.access(filename, os.W_OK):
|
|
||||||
sys.exit('ERROR: no write permissions on %s' % filename)
|
|
||||||
|
|
||||||
try:
|
|
||||||
urlh = compat_urllib_request.urlopen(BIN_URL)
|
|
||||||
newcontent = urlh.read()
|
|
||||||
urlh.close()
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
sys.exit('ERROR: unable to download latest version')
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(filename, 'wb') as outf:
|
|
||||||
outf.write(newcontent)
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
sys.exit('ERROR: unable to overwrite current version')
|
|
||||||
|
|
||||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
from distutils.core import setup
|
|
||||||
import py2exe
|
|
||||||
|
|
||||||
py2exe_options = {
|
|
||||||
"bundle_files": 1,
|
|
||||||
"compressed": 1,
|
|
||||||
"optimize": 2,
|
|
||||||
"dist_dir": '.',
|
|
||||||
"dll_excludes": ['w9xpopen.exe']
|
|
||||||
}
|
|
||||||
|
|
||||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
|
|
||||||
@@ -1,102 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import sys, os
|
|
||||||
import urllib2
|
|
||||||
import json, hashlib
|
|
||||||
|
|
||||||
def rsa_verify(message, signature, key):
|
|
||||||
from struct import pack
|
|
||||||
from hashlib import sha256
|
|
||||||
from sys import version_info
|
|
||||||
def b(x):
|
|
||||||
if version_info[0] == 2: return x
|
|
||||||
else: return x.encode('latin1')
|
|
||||||
assert(type(message) == type(b('')))
|
|
||||||
block_size = 0
|
|
||||||
n = key[0]
|
|
||||||
while n:
|
|
||||||
block_size += 1
|
|
||||||
n >>= 8
|
|
||||||
signature = pow(int(signature, 16), key[1], key[0])
|
|
||||||
raw_bytes = []
|
|
||||||
while signature:
|
|
||||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
|
||||||
signature >>= 8
|
|
||||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
|
||||||
if signature[0:2] != b('\x00\x01'): return False
|
|
||||||
signature = signature[2:]
|
|
||||||
if not b('\x00') in signature: return False
|
|
||||||
signature = signature[signature.index(b('\x00'))+1:]
|
|
||||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
|
||||||
signature = signature[19:]
|
|
||||||
if signature != sha256(message).digest(): return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
|
||||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
|
|
||||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
|
|
||||||
|
|
||||||
raw_input()
|
|
||||||
|
|
||||||
filename = sys.argv[0]
|
|
||||||
|
|
||||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
|
|
||||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
|
||||||
JSON_URL = UPDATE_URL + 'versions.json'
|
|
||||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
|
||||||
|
|
||||||
if not os.access(filename, os.W_OK):
|
|
||||||
sys.exit('ERROR: no write permissions on %s' % filename)
|
|
||||||
|
|
||||||
exe = os.path.abspath(filename)
|
|
||||||
directory = os.path.dirname(exe)
|
|
||||||
if not os.access(directory, os.W_OK):
|
|
||||||
sys.exit('ERROR: no write permissions on %s' % directory)
|
|
||||||
|
|
||||||
try:
|
|
||||||
versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
|
|
||||||
versions_info = json.loads(versions_info)
|
|
||||||
except:
|
|
||||||
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
|
|
||||||
if not 'signature' in versions_info:
|
|
||||||
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
|
|
||||||
signature = versions_info['signature']
|
|
||||||
del versions_info['signature']
|
|
||||||
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
|
|
||||||
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
|
|
||||||
|
|
||||||
version = versions_info['versions'][versions_info['latest']]
|
|
||||||
|
|
||||||
try:
|
|
||||||
urlh = urllib2.urlopen(version['exe'][0])
|
|
||||||
newcontent = urlh.read()
|
|
||||||
urlh.close()
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
sys.exit('ERROR: unable to download latest version')
|
|
||||||
|
|
||||||
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
|
|
||||||
if newcontent_hash != version['exe'][1]:
|
|
||||||
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(exe + '.new', 'wb') as outf:
|
|
||||||
outf.write(newcontent)
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
sys.exit(u'ERROR: unable to write the new version')
|
|
||||||
|
|
||||||
try:
|
|
||||||
bat = os.path.join(directory, 'youtube-dl-updater.bat')
|
|
||||||
b = open(bat, 'w')
|
|
||||||
b.write("""
|
|
||||||
echo Updating youtube-dl...
|
|
||||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
|
||||||
move /Y "%s.new" "%s"
|
|
||||||
del "%s"
|
|
||||||
\n""" %(exe, exe, bat))
|
|
||||||
b.close()
|
|
||||||
|
|
||||||
os.startfile(bat)
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
sys.exit('ERROR: unable to overwrite current version')
|
|
||||||
|
|
||||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
|
|
||||||
28
devscripts/zsh-completion.in
Normal file
28
devscripts/zsh-completion.in
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
#compdef youtube-dl
|
||||||
|
|
||||||
|
__youtube_dl() {
|
||||||
|
local curcontext="$curcontext" fileopts diropts cur prev
|
||||||
|
typeset -A opt_args
|
||||||
|
fileopts="{{fileopts}}"
|
||||||
|
diropts="{{diropts}}"
|
||||||
|
cur=$words[CURRENT]
|
||||||
|
case $cur in
|
||||||
|
:)
|
||||||
|
_arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
prev=$words[CURRENT-1]
|
||||||
|
if [[ ${prev} =~ ${fileopts} ]]; then
|
||||||
|
_path_files
|
||||||
|
elif [[ ${prev} =~ ${diropts} ]]; then
|
||||||
|
_path_files -/
|
||||||
|
elif [[ ${prev} == "--recode-video" ]]; then
|
||||||
|
_arguments '*: :(mp4 flv ogg webm mkv)'
|
||||||
|
else
|
||||||
|
_arguments '*: :({{flags}})'
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
__youtube_dl
|
||||||
48
devscripts/zsh-completion.py
Executable file
48
devscripts/zsh-completion.py
Executable file
@@ -0,0 +1,48 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import os
|
||||||
|
from os.path import dirname as dirn
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
||||||
|
import youtube_dl
|
||||||
|
|
||||||
|
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
|
||||||
|
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
|
||||||
|
|
||||||
|
|
||||||
|
def build_completion(opt_parser):
|
||||||
|
opts = [opt for group in opt_parser.option_groups
|
||||||
|
for opt in group.option_list]
|
||||||
|
opts_file = [opt for opt in opts if opt.metavar == "FILE"]
|
||||||
|
opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
|
||||||
|
|
||||||
|
fileopts = []
|
||||||
|
for opt in opts_file:
|
||||||
|
if opt._short_opts:
|
||||||
|
fileopts.extend(opt._short_opts)
|
||||||
|
if opt._long_opts:
|
||||||
|
fileopts.extend(opt._long_opts)
|
||||||
|
|
||||||
|
diropts = []
|
||||||
|
for opt in opts_dir:
|
||||||
|
if opt._short_opts:
|
||||||
|
diropts.extend(opt._short_opts)
|
||||||
|
if opt._long_opts:
|
||||||
|
diropts.extend(opt._long_opts)
|
||||||
|
|
||||||
|
flags = [opt.get_opt_string() for opt in opts]
|
||||||
|
|
||||||
|
with open(ZSH_COMPLETION_TEMPLATE) as f:
|
||||||
|
template = f.read()
|
||||||
|
|
||||||
|
template = template.replace("{{fileopts}}", "|".join(fileopts))
|
||||||
|
template = template.replace("{{diropts}}", "|".join(diropts))
|
||||||
|
template = template.replace("{{flags}}", " ".join(flags))
|
||||||
|
|
||||||
|
with open(ZSH_COMPLETION_FILE, "w") as f:
|
||||||
|
f.write(template)
|
||||||
|
|
||||||
|
parser = youtube_dl.parseOpts()[0]
|
||||||
|
build_completion(parser)
|
||||||
@@ -44,8 +44,8 @@ copyright = u'2014, Ricardo Garcia Gonzalez'
|
|||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
import youtube_dl
|
from youtube_dl.version import __version__
|
||||||
version = youtube_dl.__version__
|
version = __version__
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = version
|
release = version
|
||||||
|
|
||||||
|
|||||||
607
docs/supportedsites.md
Normal file
607
docs/supportedsites.md
Normal file
@@ -0,0 +1,607 @@
|
|||||||
|
# Supported sites
|
||||||
|
- **1tv**: Первый канал
|
||||||
|
- **1up.com**
|
||||||
|
- **220.ro**
|
||||||
|
- **22tracks:genre**
|
||||||
|
- **22tracks:track**
|
||||||
|
- **24video**
|
||||||
|
- **3sat**
|
||||||
|
- **4tube**
|
||||||
|
- **56.com**
|
||||||
|
- **5min**
|
||||||
|
- **8tracks**
|
||||||
|
- **9gag**
|
||||||
|
- **abc.net.au**
|
||||||
|
- **Abc7News**
|
||||||
|
- **AcademicEarth:Course**
|
||||||
|
- **AddAnime**
|
||||||
|
- **AdobeTV**
|
||||||
|
- **AdultSwim**
|
||||||
|
- **Aftenposten**
|
||||||
|
- **Aftonbladet**
|
||||||
|
- **AirMozilla**
|
||||||
|
- **AlJazeera**
|
||||||
|
- **Allocine**
|
||||||
|
- **AlphaPorno**
|
||||||
|
- **anitube.se**
|
||||||
|
- **AnySex**
|
||||||
|
- **Aparat**
|
||||||
|
- **AppleDailyAnimationNews**
|
||||||
|
- **AppleDailyRealtimeNews**
|
||||||
|
- **AppleTrailers**
|
||||||
|
- **archive.org**: archive.org videos
|
||||||
|
- **ARD**
|
||||||
|
- **ARD:mediathek**
|
||||||
|
- **arte.tv**
|
||||||
|
- **arte.tv:+7**
|
||||||
|
- **arte.tv:concert**
|
||||||
|
- **arte.tv:creative**
|
||||||
|
- **arte.tv:ddc**
|
||||||
|
- **arte.tv:embed**
|
||||||
|
- **arte.tv:future**
|
||||||
|
- **AtresPlayer**
|
||||||
|
- **ATTTechChannel**
|
||||||
|
- **audiomack**
|
||||||
|
- **audiomack:album**
|
||||||
|
- **Azubu**
|
||||||
|
- **bambuser**
|
||||||
|
- **bambuser:channel**
|
||||||
|
- **Bandcamp**
|
||||||
|
- **Bandcamp:album**
|
||||||
|
- **bbc.co.uk**: BBC iPlayer
|
||||||
|
- **BeatportPro**
|
||||||
|
- **Beeg**
|
||||||
|
- **BehindKink**
|
||||||
|
- **Bet**
|
||||||
|
- **Bild**: Bild.de
|
||||||
|
- **BiliBili**
|
||||||
|
- **blinkx**
|
||||||
|
- **blip.tv:user**
|
||||||
|
- **BlipTV**
|
||||||
|
- **Bloomberg**
|
||||||
|
- **Bpb**: Bundeszentrale für politische Bildung
|
||||||
|
- **BR**: Bayerischer Rundfunk Mediathek
|
||||||
|
- **Break**
|
||||||
|
- **Brightcove**
|
||||||
|
- **BuzzFeed**
|
||||||
|
- **BYUtv**
|
||||||
|
- **Camdemy**
|
||||||
|
- **CamdemyFolder**
|
||||||
|
- **Canal13cl**
|
||||||
|
- **canalc2.tv**
|
||||||
|
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
||||||
|
- **CBS**
|
||||||
|
- **CBSNews**: CBS News
|
||||||
|
- **CBSSports**
|
||||||
|
- **CeskaTelevize**
|
||||||
|
- **channel9**: Channel 9
|
||||||
|
- **Chilloutzone**
|
||||||
|
- **chirbit**
|
||||||
|
- **chirbit:profile**
|
||||||
|
- **Cinchcast**
|
||||||
|
- **Cinemassacre**
|
||||||
|
- **clipfish**
|
||||||
|
- **cliphunter**
|
||||||
|
- **Clipsyndicate**
|
||||||
|
- **Cloudy**
|
||||||
|
- **Clubic**
|
||||||
|
- **cmt.com**
|
||||||
|
- **CNET**
|
||||||
|
- **CNN**
|
||||||
|
- **CNNArticle**
|
||||||
|
- **CNNBlogs**
|
||||||
|
- **CollegeHumor**
|
||||||
|
- **CollegeRama**
|
||||||
|
- **ComCarCoff**
|
||||||
|
- **ComedyCentral**
|
||||||
|
- **ComedyCentralShows**: The Daily Show / The Colbert Report
|
||||||
|
- **CondeNast**: Condé Nast media group: Condé Nast, GQ, Glamour, Vanity Fair, Vogue, W Magazine, WIRED
|
||||||
|
- **Cracked**
|
||||||
|
- **Criterion**
|
||||||
|
- **Crunchyroll**
|
||||||
|
- **crunchyroll:playlist**
|
||||||
|
- **CSpan**: C-SPAN
|
||||||
|
- **CtsNews**
|
||||||
|
- **culturebox.francetvinfo.fr**
|
||||||
|
- **dailymotion**
|
||||||
|
- **dailymotion:playlist**
|
||||||
|
- **dailymotion:user**
|
||||||
|
- **daum.net**
|
||||||
|
- **DBTV**
|
||||||
|
- **DctpTv**
|
||||||
|
- **DeezerPlaylist**
|
||||||
|
- **defense.gouv.fr**
|
||||||
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
||||||
|
- **Discovery**
|
||||||
|
- **divxstage**: DivxStage
|
||||||
|
- **Dotsub**
|
||||||
|
- **DouyuTV**
|
||||||
|
- **DRBonanza**
|
||||||
|
- **Dropbox**
|
||||||
|
- **DrTuber**
|
||||||
|
- **DRTV**
|
||||||
|
- **Dump**
|
||||||
|
- **Dumpert**
|
||||||
|
- **dvtv**: http://video.aktualne.cz/
|
||||||
|
- **EaglePlatform**
|
||||||
|
- **EbaumsWorld**
|
||||||
|
- **EchoMsk**
|
||||||
|
- **eHow**
|
||||||
|
- **Einthusan**
|
||||||
|
- **eitb.tv**
|
||||||
|
- **EllenTV**
|
||||||
|
- **EllenTV:clips**
|
||||||
|
- **ElPais**: El País
|
||||||
|
- **Embedly**
|
||||||
|
- **EMPFlix**
|
||||||
|
- **Engadget**
|
||||||
|
- **Eporner**
|
||||||
|
- **EroProfile**
|
||||||
|
- **Escapist**
|
||||||
|
- **EveryonesMixtape**
|
||||||
|
- **exfm**: ex.fm
|
||||||
|
- **ExpoTV**
|
||||||
|
- **ExtremeTube**
|
||||||
|
- **facebook**
|
||||||
|
- **faz.net**
|
||||||
|
- **fc2**
|
||||||
|
- **fernsehkritik.tv**
|
||||||
|
- **fernsehkritik.tv:postecke**
|
||||||
|
- **Firedrive**
|
||||||
|
- **Firstpost**
|
||||||
|
- **Flickr**
|
||||||
|
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
||||||
|
- **FootyRoom**
|
||||||
|
- **Foxgay**
|
||||||
|
- **FoxNews**
|
||||||
|
- **france2.fr:generation-quoi**
|
||||||
|
- **FranceCulture**
|
||||||
|
- **FranceInter**
|
||||||
|
- **francetv**: France 2, 3, 4, 5 and Ô
|
||||||
|
- **francetvinfo.fr**
|
||||||
|
- **Freesound**
|
||||||
|
- **freespeech.org**
|
||||||
|
- **FreeVideo**
|
||||||
|
- **FunnyOrDie**
|
||||||
|
- **Gamekings**
|
||||||
|
- **GameOne**
|
||||||
|
- **gameone:playlist**
|
||||||
|
- **GameSpot**
|
||||||
|
- **GameStar**
|
||||||
|
- **Gametrailers**
|
||||||
|
- **Gazeta**
|
||||||
|
- **GDCVault**
|
||||||
|
- **generic**: Generic downloader that works on some sites
|
||||||
|
- **GiantBomb**
|
||||||
|
- **Giga**
|
||||||
|
- **Glide**: Glide mobile video messages (glide.me)
|
||||||
|
- **Globo**
|
||||||
|
- **GodTube**
|
||||||
|
- **GoldenMoustache**
|
||||||
|
- **Golem**
|
||||||
|
- **GorillaVid**: GorillaVid.in, daclips.in, movpod.in and fastvideo.in
|
||||||
|
- **Goshgay**
|
||||||
|
- **Grooveshark**
|
||||||
|
- **Groupon**
|
||||||
|
- **Hark**
|
||||||
|
- **HearThisAt**
|
||||||
|
- **Heise**
|
||||||
|
- **HellPorno**
|
||||||
|
- **Helsinki**: helsinki.fi
|
||||||
|
- **HentaiStigma**
|
||||||
|
- **HistoricFilms**
|
||||||
|
- **History**
|
||||||
|
- **hitbox**
|
||||||
|
- **hitbox:live**
|
||||||
|
- **HornBunny**
|
||||||
|
- **HostingBulk**
|
||||||
|
- **HotNewHipHop**
|
||||||
|
- **Howcast**
|
||||||
|
- **HowStuffWorks**
|
||||||
|
- **HuffPost**: Huffington Post
|
||||||
|
- **Hypem**
|
||||||
|
- **Iconosquare**
|
||||||
|
- **ign.com**
|
||||||
|
- **imdb**: Internet Movie Database trailers
|
||||||
|
- **imdb:list**: Internet Movie Database lists
|
||||||
|
- **Imgur**
|
||||||
|
- **Ina**
|
||||||
|
- **InfoQ**
|
||||||
|
- **Instagram**
|
||||||
|
- **instagram:user**: Instagram user profile
|
||||||
|
- **InternetVideoArchive**
|
||||||
|
- **IPrima**
|
||||||
|
- **ivi**: ivi.ru
|
||||||
|
- **ivi:compilation**: ivi.ru compilations
|
||||||
|
- **Izlesene**
|
||||||
|
- **JadoreCettePub**
|
||||||
|
- **JeuxVideo**
|
||||||
|
- **Jove**
|
||||||
|
- **jpopsuki.tv**
|
||||||
|
- **Jukebox**
|
||||||
|
- **Kaltura**
|
||||||
|
- **KanalPlay**: Kanal 5/9/11 Play
|
||||||
|
- **Kankan**
|
||||||
|
- **Karaoketv**
|
||||||
|
- **keek**
|
||||||
|
- **KeezMovies**
|
||||||
|
- **KhanAcademy**
|
||||||
|
- **KickStarter**
|
||||||
|
- **kontrtube**: KontrTube.ru - Труба зовёт
|
||||||
|
- **KrasView**: Красвью
|
||||||
|
- **Ku6**
|
||||||
|
- **la7.tv**
|
||||||
|
- **Laola1Tv**
|
||||||
|
- **Letv**
|
||||||
|
- **LetvPlaylist**
|
||||||
|
- **LetvTv**
|
||||||
|
- **Libsyn**
|
||||||
|
- **lifenews**: LIFE | NEWS
|
||||||
|
- **LiveLeak**
|
||||||
|
- **livestream**
|
||||||
|
- **livestream:original**
|
||||||
|
- **LnkGo**
|
||||||
|
- **lrt.lt**
|
||||||
|
- **lynda**: lynda.com videos
|
||||||
|
- **lynda:course**: lynda.com online courses
|
||||||
|
- **m6**
|
||||||
|
- **macgamestore**: MacGameStore trailers
|
||||||
|
- **mailru**: Видео@Mail.Ru
|
||||||
|
- **Malemotion**
|
||||||
|
- **MDR**
|
||||||
|
- **media.ccc.de**
|
||||||
|
- **metacafe**
|
||||||
|
- **Metacritic**
|
||||||
|
- **Mgoon**
|
||||||
|
- **Minhateca**
|
||||||
|
- **MinistryGrid**
|
||||||
|
- **miomio.tv**
|
||||||
|
- **mitele.es**
|
||||||
|
- **mixcloud**
|
||||||
|
- **MLB**
|
||||||
|
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||||
|
- **Mofosex**
|
||||||
|
- **Mojvideo**
|
||||||
|
- **Moniker**: allmyvideos.net and vidspot.net
|
||||||
|
- **mooshare**: Mooshare.biz
|
||||||
|
- **Morningstar**: morningstar.com
|
||||||
|
- **Motherless**
|
||||||
|
- **Motorsport**: motorsport.com
|
||||||
|
- **MovieClips**
|
||||||
|
- **Moviezine**
|
||||||
|
- **movshare**: MovShare
|
||||||
|
- **MPORA**
|
||||||
|
- **MTV**
|
||||||
|
- **mtviggy.com**
|
||||||
|
- **mtvservices:embedded**
|
||||||
|
- **MuenchenTV**: münchen.tv
|
||||||
|
- **MusicPlayOn**
|
||||||
|
- **MusicVault**
|
||||||
|
- **muzu.tv**
|
||||||
|
- **MySpace**
|
||||||
|
- **MySpace:album**
|
||||||
|
- **MySpass**
|
||||||
|
- **myvideo**
|
||||||
|
- **MyVidster**
|
||||||
|
- **n-tv.de**
|
||||||
|
- **NationalGeographic**
|
||||||
|
- **Naver**
|
||||||
|
- **NBA**
|
||||||
|
- **NBC**
|
||||||
|
- **NBCNews**
|
||||||
|
- **NBCSports**
|
||||||
|
- **NBCSportsVPlayer**
|
||||||
|
- **ndr**: NDR.de - Mediathek
|
||||||
|
- **NDTV**
|
||||||
|
- **NerdCubedFeed**
|
||||||
|
- **Nerdist**
|
||||||
|
- **Netzkino**
|
||||||
|
- **Newgrounds**
|
||||||
|
- **Newstube**
|
||||||
|
- **NextMedia**
|
||||||
|
- **NextMediaActionNews**
|
||||||
|
- **nfb**: National Film Board of Canada
|
||||||
|
- **nfl.com**
|
||||||
|
- **nhl.com**
|
||||||
|
- **nhl.com:news**: NHL news
|
||||||
|
- **nhl.com:videocenter**: NHL videocenter category
|
||||||
|
- **niconico**: ニコニコ動画
|
||||||
|
- **NiconicoPlaylist**
|
||||||
|
- **Noco**
|
||||||
|
- **Normalboots**
|
||||||
|
- **NosVideo**
|
||||||
|
- **novamov**: NovaMov
|
||||||
|
- **Nowness**
|
||||||
|
- **nowvideo**: NowVideo
|
||||||
|
- **npo.nl**
|
||||||
|
- **npo.nl:live**
|
||||||
|
- **npo.nl:radio**
|
||||||
|
- **npo.nl:radio:fragment**
|
||||||
|
- **NRK**
|
||||||
|
- **NRKPlaylist**
|
||||||
|
- **NRKTV**
|
||||||
|
- **ntv.ru**
|
||||||
|
- **Nuvid**
|
||||||
|
- **NYTimes**
|
||||||
|
- **ocw.mit.edu**
|
||||||
|
- **Odnoklassniki**
|
||||||
|
- **OktoberfestTV**
|
||||||
|
- **on.aol.com**
|
||||||
|
- **Ooyala**
|
||||||
|
- **OpenFilm**
|
||||||
|
- **orf:fm4**: radio FM4
|
||||||
|
- **orf:iptv**: iptv.ORF.at
|
||||||
|
- **orf:oe1**: Radio Österreich 1
|
||||||
|
- **orf:tvthek**: ORF TVthek
|
||||||
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
|
- **Patreon**
|
||||||
|
- **PBS**
|
||||||
|
- **Phoenix**
|
||||||
|
- **Photobucket**
|
||||||
|
- **Pladform**
|
||||||
|
- **PlanetaPlay**
|
||||||
|
- **play.fm**
|
||||||
|
- **played.to**
|
||||||
|
- **Playvid**
|
||||||
|
- **Playwire**
|
||||||
|
- **plus.google**: Google Plus
|
||||||
|
- **pluzz.francetv.fr**
|
||||||
|
- **podomatic**
|
||||||
|
- **PornHd**
|
||||||
|
- **PornHub**
|
||||||
|
- **PornHubPlaylist**
|
||||||
|
- **Pornotube**
|
||||||
|
- **PornoXO**
|
||||||
|
- **PrimeShareTV**
|
||||||
|
- **PromptFile**
|
||||||
|
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||||
|
- **Puls4**
|
||||||
|
- **Pyvideo**
|
||||||
|
- **QuickVid**
|
||||||
|
- **R7**
|
||||||
|
- **radio.de**
|
||||||
|
- **radiobremen**
|
||||||
|
- **radiofrance**
|
||||||
|
- **Rai**
|
||||||
|
- **RBMARadio**
|
||||||
|
- **RedTube**
|
||||||
|
- **Restudy**
|
||||||
|
- **ReverbNation**
|
||||||
|
- **RingTV**
|
||||||
|
- **RottenTomatoes**
|
||||||
|
- **Roxwel**
|
||||||
|
- **RTBF**
|
||||||
|
- **Rte**
|
||||||
|
- **rtl.nl**: rtl.nl and rtlxl.nl
|
||||||
|
- **RTL2**
|
||||||
|
- **RTLnow**
|
||||||
|
- **RTP**
|
||||||
|
- **RTS**: RTS.ch
|
||||||
|
- **rtve.es:alacarta**: RTVE a la carta
|
||||||
|
- **rtve.es:infantil**: RTVE infantil
|
||||||
|
- **rtve.es:live**: RTVE.es live streams
|
||||||
|
- **RUHD**
|
||||||
|
- **rutube**: Rutube videos
|
||||||
|
- **rutube:channel**: Rutube channels
|
||||||
|
- **rutube:embed**: Rutube embedded videos
|
||||||
|
- **rutube:movie**: Rutube movies
|
||||||
|
- **rutube:person**: Rutube person videos
|
||||||
|
- **RUTV**: RUTV.RU
|
||||||
|
- **safari**: safaribooksonline.com online video
|
||||||
|
- **safari:course**: safaribooksonline.com online courses
|
||||||
|
- **Sandia**: Sandia National Laboratories
|
||||||
|
- **Sapo**: SAPO Vídeos
|
||||||
|
- **savefrom.net**
|
||||||
|
- **SBS**: sbs.com.au
|
||||||
|
- **SciVee**
|
||||||
|
- **screen.yahoo:search**: Yahoo screen search
|
||||||
|
- **Screencast**
|
||||||
|
- **ScreencastOMatic**
|
||||||
|
- **ScreenwaveMedia**
|
||||||
|
- **ServingSys**
|
||||||
|
- **Sexu**
|
||||||
|
- **SexyKarma**: Sexy Karma and Watch Indian Porn
|
||||||
|
- **Shared**
|
||||||
|
- **ShareSix**
|
||||||
|
- **Sina**
|
||||||
|
- **Slideshare**
|
||||||
|
- **Slutload**
|
||||||
|
- **smotri**: Smotri.com
|
||||||
|
- **smotri:broadcast**: Smotri.com broadcasts
|
||||||
|
- **smotri:community**: Smotri.com community videos
|
||||||
|
- **smotri:user**: Smotri.com user videos
|
||||||
|
- **Snotr**
|
||||||
|
- **Sockshare**
|
||||||
|
- **Sohu**
|
||||||
|
- **soundcloud**
|
||||||
|
- **soundcloud:playlist**
|
||||||
|
- **soundcloud:set**
|
||||||
|
- **soundcloud:user**
|
||||||
|
- **soundgasm**
|
||||||
|
- **soundgasm:profile**
|
||||||
|
- **southpark.cc.com**
|
||||||
|
- **southpark.de**
|
||||||
|
- **Space**
|
||||||
|
- **Spankwire**
|
||||||
|
- **Spiegel**
|
||||||
|
- **Spiegel:Article**: Articles on spiegel.de
|
||||||
|
- **Spiegeltv**
|
||||||
|
- **Spike**
|
||||||
|
- **Sport5**
|
||||||
|
- **SportBox**
|
||||||
|
- **SportDeutschland**
|
||||||
|
- **SRMediathek**: Saarländischer Rundfunk
|
||||||
|
- **SSA**
|
||||||
|
- **stanfordoc**: Stanford Open ClassRoom
|
||||||
|
- **Steam**
|
||||||
|
- **streamcloud.eu**
|
||||||
|
- **StreamCZ**
|
||||||
|
- **StreetVoice**
|
||||||
|
- **SunPorno**
|
||||||
|
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||||
|
- **SWRMediathek**
|
||||||
|
- **Syfy**
|
||||||
|
- **SztvHu**
|
||||||
|
- **Tagesschau**
|
||||||
|
- **Tapely**
|
||||||
|
- **Tass**
|
||||||
|
- **teachertube**: teachertube.com videos
|
||||||
|
- **teachertube:user:collection**: teachertube.com user and collection videos
|
||||||
|
- **TeachingChannel**
|
||||||
|
- **Teamcoco**
|
||||||
|
- **TeamFour**
|
||||||
|
- **TechTalks**
|
||||||
|
- **techtv.mit.edu**
|
||||||
|
- **TED**
|
||||||
|
- **tegenlicht.vpro.nl**
|
||||||
|
- **TeleBruxelles**
|
||||||
|
- **telecinco.es**
|
||||||
|
- **TeleMB**
|
||||||
|
- **TeleTask**
|
||||||
|
- **TenPlay**
|
||||||
|
- **TestTube**
|
||||||
|
- **TF1**
|
||||||
|
- **TheOnion**
|
||||||
|
- **ThePlatform**
|
||||||
|
- **TheSixtyOne**
|
||||||
|
- **ThisAV**
|
||||||
|
- **THVideo**
|
||||||
|
- **THVideoPlaylist**
|
||||||
|
- **tinypic**: tinypic.com videos
|
||||||
|
- **tlc.com**
|
||||||
|
- **tlc.de**
|
||||||
|
- **TMZ**
|
||||||
|
- **TNAFlix**
|
||||||
|
- **tou.tv**
|
||||||
|
- **Toypics**: Toypics user profile
|
||||||
|
- **ToypicsUser**: Toypics user profile
|
||||||
|
- **TrailerAddict** (Currently broken)
|
||||||
|
- **Trilulilu**
|
||||||
|
- **TruTube**
|
||||||
|
- **Tube8**
|
||||||
|
- **Tudou**
|
||||||
|
- **Tumblr**
|
||||||
|
- **TuneIn**
|
||||||
|
- **Turbo**
|
||||||
|
- **Tutv**
|
||||||
|
- **tv.dfb.de**
|
||||||
|
- **TV4**: tv4.se and tv4play.se
|
||||||
|
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||||
|
- **tvp.pl**
|
||||||
|
- **tvp.pl:Series**
|
||||||
|
- **TVPlay**: TV3Play and related services
|
||||||
|
- **Tweakers**
|
||||||
|
- **twitch:bookmarks**
|
||||||
|
- **twitch:chapter**
|
||||||
|
- **twitch:past_broadcasts**
|
||||||
|
- **twitch:profile**
|
||||||
|
- **twitch:stream**
|
||||||
|
- **twitch:video**
|
||||||
|
- **twitch:vod**
|
||||||
|
- **Ubu**
|
||||||
|
- **udemy**
|
||||||
|
- **udemy:course**
|
||||||
|
- **Ultimedia**
|
||||||
|
- **Unistra**
|
||||||
|
- **Urort**: NRK P3 Urørt
|
||||||
|
- **ustream**
|
||||||
|
- **ustream:channel**
|
||||||
|
- **Varzesh3**
|
||||||
|
- **Vbox7**
|
||||||
|
- **VeeHD**
|
||||||
|
- **Veoh**
|
||||||
|
- **Vessel**
|
||||||
|
- **Vesti**: Вести.Ru
|
||||||
|
- **Vevo**
|
||||||
|
- **VGTV**
|
||||||
|
- **vh1.com**
|
||||||
|
- **Vice**
|
||||||
|
- **Viddler**
|
||||||
|
- **video.google:search**: Google Video search
|
||||||
|
- **video.mit.edu**
|
||||||
|
- **VideoBam**
|
||||||
|
- **VideoDetective**
|
||||||
|
- **videofy.me**
|
||||||
|
- **videolectures.net**
|
||||||
|
- **VideoMega**
|
||||||
|
- **VideoPremium**
|
||||||
|
- **VideoTt**: video.tt - Your True Tube
|
||||||
|
- **videoweed**: VideoWeed
|
||||||
|
- **Vidme**
|
||||||
|
- **Vidzi**
|
||||||
|
- **vier**
|
||||||
|
- **vier:videos**
|
||||||
|
- **Viewster**
|
||||||
|
- **viki**
|
||||||
|
- **vimeo**
|
||||||
|
- **vimeo:album**
|
||||||
|
- **vimeo:channel**
|
||||||
|
- **vimeo:group**
|
||||||
|
- **vimeo:likes**: Vimeo user likes
|
||||||
|
- **vimeo:review**: Review pages on vimeo
|
||||||
|
- **vimeo:user**
|
||||||
|
- **vimeo:watchlater**: Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)
|
||||||
|
- **Vimple**: Vimple.ru
|
||||||
|
- **Vine**
|
||||||
|
- **vine:user**
|
||||||
|
- **vk.com**
|
||||||
|
- **vk.com:user-videos**: vk.com:All of a user's videos
|
||||||
|
- **Vodlocker**
|
||||||
|
- **Vporn**
|
||||||
|
- **VRT**
|
||||||
|
- **vube**: Vube.com
|
||||||
|
- **VuClip**
|
||||||
|
- **vulture.com**
|
||||||
|
- **Walla**
|
||||||
|
- **WashingtonPost**
|
||||||
|
- **wat.tv**
|
||||||
|
- **WayOfTheMaster**
|
||||||
|
- **WDR**
|
||||||
|
- **wdr:mobile**
|
||||||
|
- **WDRMaus**: Sendung mit der Maus
|
||||||
|
- **WebOfStories**
|
||||||
|
- **Weibo**
|
||||||
|
- **Wimp**
|
||||||
|
- **Wistia**
|
||||||
|
- **WorldStarHipHop**
|
||||||
|
- **wrzuta.pl**
|
||||||
|
- **WSJ**: Wall Street Journal
|
||||||
|
- **XBef**
|
||||||
|
- **XboxClips**
|
||||||
|
- **XHamster**
|
||||||
|
- **XMinus**
|
||||||
|
- **XNXX**
|
||||||
|
- **XTube**
|
||||||
|
- **XTubeUser**: XTube user profile
|
||||||
|
- **Xuite**
|
||||||
|
- **XVideos**
|
||||||
|
- **XXXYMovies**
|
||||||
|
- **Yahoo**: Yahoo screen and movies
|
||||||
|
- **Yam**
|
||||||
|
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
||||||
|
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
||||||
|
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||||
|
- **YesJapan**
|
||||||
|
- **Ynet**
|
||||||
|
- **YouJizz**
|
||||||
|
- **Youku**
|
||||||
|
- **YouPorn**
|
||||||
|
- **YourUpload**
|
||||||
|
- **youtube**: YouTube.com
|
||||||
|
- **youtube:channel**: YouTube.com channels
|
||||||
|
- **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication)
|
||||||
|
- **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
|
||||||
|
- **youtube:playlist**: YouTube.com playlists
|
||||||
|
- **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
|
||||||
|
- **youtube:search**: YouTube.com searches
|
||||||
|
- **youtube:search:date**: YouTube.com searches, newest videos first
|
||||||
|
- **youtube:search_url**: YouTube.com search URLs
|
||||||
|
- **youtube:show**: YouTube.com (multi-season) shows
|
||||||
|
- **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
|
||||||
|
- **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword)
|
||||||
|
- **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
|
||||||
|
- **Zapiks**
|
||||||
|
- **ZDF**
|
||||||
|
- **ZDFChannel**
|
||||||
|
- **zingmp3:album**: mp3.zing.vn albums
|
||||||
|
- **zingmp3:song**: mp3.zing.vn songs
|
||||||
@@ -1,2 +1,6 @@
|
|||||||
[wheel]
|
[wheel]
|
||||||
universal = True
|
universal = True
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,setup.py,build,.git
|
||||||
|
ignore = E402,E501,E731
|
||||||
|
|||||||
6
setup.py
6
setup.py
@@ -4,7 +4,6 @@
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
import pkg_resources
|
|
||||||
import warnings
|
import warnings
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
@@ -48,6 +47,7 @@ if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
|||||||
else:
|
else:
|
||||||
files_spec = [
|
files_spec = [
|
||||||
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
|
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
|
||||||
|
('etc/fish/completions', ['youtube-dl.fish']),
|
||||||
('share/doc/youtube_dl', ['README.txt']),
|
('share/doc/youtube_dl', ['README.txt']),
|
||||||
('share/man/man1', ['youtube-dl.1'])
|
('share/man/man1', ['youtube-dl.1'])
|
||||||
]
|
]
|
||||||
@@ -102,7 +102,9 @@ setup(
|
|||||||
"Programming Language :: Python :: 2.6",
|
"Programming Language :: Python :: 2.6",
|
||||||
"Programming Language :: Python :: 2.7",
|
"Programming Language :: Python :: 2.7",
|
||||||
"Programming Language :: Python :: 3",
|
"Programming Language :: Python :: 3",
|
||||||
"Programming Language :: Python :: 3.3"
|
"Programming Language :: Python :: 3.2",
|
||||||
|
"Programming Language :: Python :: 3.3",
|
||||||
|
"Programming Language :: Python :: 3.4",
|
||||||
],
|
],
|
||||||
|
|
||||||
**params
|
**params
|
||||||
|
|||||||
126
test/helper.py
126
test/helper.py
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import errno
|
import errno
|
||||||
import io
|
import io
|
||||||
import hashlib
|
import hashlib
|
||||||
@@ -12,6 +14,7 @@ from youtube_dl import YoutubeDL
|
|||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
compat_str,
|
compat_str,
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
|
write_string,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -40,10 +43,10 @@ def report_warning(message):
|
|||||||
If stderr is a tty file the 'WARNING:' will be colored
|
If stderr is a tty file the 'WARNING:' will be colored
|
||||||
'''
|
'''
|
||||||
if sys.stderr.isatty() and os.name != 'nt':
|
if sys.stderr.isatty() and os.name != 'nt':
|
||||||
_msg_header = u'\033[0;33mWARNING:\033[0m'
|
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||||
else:
|
else:
|
||||||
_msg_header = u'WARNING:'
|
_msg_header = 'WARNING:'
|
||||||
output = u'%s %s\n' % (_msg_header, message)
|
output = '%s %s\n' % (_msg_header, message)
|
||||||
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
|
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
|
||||||
output = output.encode(preferredencoding())
|
output = output.encode(preferredencoding())
|
||||||
sys.stderr.write(output)
|
sys.stderr.write(output)
|
||||||
@@ -54,7 +57,7 @@ class FakeYDL(YoutubeDL):
|
|||||||
# Different instances of the downloader can't share the same dictionary
|
# Different instances of the downloader can't share the same dictionary
|
||||||
# some test set the "sublang" parameter, which would break the md5 checks.
|
# some test set the "sublang" parameter, which would break the md5 checks.
|
||||||
params = get_params(override=override)
|
params = get_params(override=override)
|
||||||
super(FakeYDL, self).__init__(params)
|
super(FakeYDL, self).__init__(params, auto_init=False)
|
||||||
self.result = []
|
self.result = []
|
||||||
|
|
||||||
def to_screen(self, s, skip_eol=None):
|
def to_screen(self, s, skip_eol=None):
|
||||||
@@ -69,32 +72,24 @@ class FakeYDL(YoutubeDL):
|
|||||||
def expect_warning(self, regex):
|
def expect_warning(self, regex):
|
||||||
# Silence an expected warning matching a regex
|
# Silence an expected warning matching a regex
|
||||||
old_report_warning = self.report_warning
|
old_report_warning = self.report_warning
|
||||||
|
|
||||||
def report_warning(self, message):
|
def report_warning(self, message):
|
||||||
if re.match(regex, message): return
|
if re.match(regex, message):
|
||||||
|
return
|
||||||
old_report_warning(message)
|
old_report_warning(message)
|
||||||
self.report_warning = types.MethodType(report_warning, self)
|
self.report_warning = types.MethodType(report_warning, self)
|
||||||
|
|
||||||
|
|
||||||
def gettestcases(include_onlymatching=False):
|
def gettestcases(include_onlymatching=False):
|
||||||
for ie in youtube_dl.extractor.gen_extractors():
|
for ie in youtube_dl.extractor.gen_extractors():
|
||||||
t = getattr(ie, '_TEST', None)
|
for tc in ie.get_testcases(include_onlymatching):
|
||||||
if t:
|
yield tc
|
||||||
assert not hasattr(ie, '_TESTS'), \
|
|
||||||
'%s has _TEST and _TESTS' % type(ie).__name__
|
|
||||||
tests = [t]
|
|
||||||
else:
|
|
||||||
tests = getattr(ie, '_TESTS', [])
|
|
||||||
for t in tests:
|
|
||||||
if not include_onlymatching and t.get('only_matching', False):
|
|
||||||
continue
|
|
||||||
t['name'] = type(ie).__name__[:-len('IE')]
|
|
||||||
yield t
|
|
||||||
|
|
||||||
|
|
||||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
def expect_info_dict(self, expected_dict, got_dict):
|
def expect_info_dict(self, got_dict, expected_dict):
|
||||||
for info_field, expected in expected_dict.items():
|
for info_field, expected in expected_dict.items():
|
||||||
if isinstance(expected, compat_str) and expected.startswith('re:'):
|
if isinstance(expected, compat_str) and expected.startswith('re:'):
|
||||||
got = got_dict.get(info_field)
|
got = got_dict.get(info_field)
|
||||||
@@ -102,34 +97,90 @@ def expect_info_dict(self, expected_dict, got_dict):
|
|||||||
match_rex = re.compile(match_str)
|
match_rex = re.compile(match_str)
|
||||||
|
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
isinstance(got, compat_str) and match_rex.match(got),
|
isinstance(got, compat_str),
|
||||||
u'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
'Expected a %s object, but got %s for field %s' % (
|
||||||
|
compat_str.__name__, type(got).__name__, info_field))
|
||||||
|
self.assertTrue(
|
||||||
|
match_rex.match(got),
|
||||||
|
'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
||||||
|
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
|
||||||
|
got = got_dict.get(info_field)
|
||||||
|
start_str = expected[len('startswith:'):]
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, compat_str),
|
||||||
|
'Expected a %s object, but got %s for field %s' % (
|
||||||
|
compat_str.__name__, type(got).__name__, info_field))
|
||||||
|
self.assertTrue(
|
||||||
|
got.startswith(start_str),
|
||||||
|
'field %s (value: %r) should start with %r' % (info_field, got, start_str))
|
||||||
|
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
|
||||||
|
got = got_dict.get(info_field)
|
||||||
|
contains_str = expected[len('contains:'):]
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, compat_str),
|
||||||
|
'Expected a %s object, but got %s for field %s' % (
|
||||||
|
compat_str.__name__, type(got).__name__, info_field))
|
||||||
|
self.assertTrue(
|
||||||
|
contains_str in got,
|
||||||
|
'field %s (value: %r) should contain %r' % (info_field, got, contains_str))
|
||||||
elif isinstance(expected, type):
|
elif isinstance(expected, type):
|
||||||
got = got_dict.get(info_field)
|
got = got_dict.get(info_field)
|
||||||
self.assertTrue(isinstance(got, expected),
|
self.assertTrue(isinstance(got, expected),
|
||||||
u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
|
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
|
||||||
else:
|
else:
|
||||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||||
got = 'md5:' + md5(got_dict.get(info_field))
|
got = 'md5:' + md5(got_dict.get(info_field))
|
||||||
|
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
|
||||||
|
got = got_dict.get(info_field)
|
||||||
|
self.assertTrue(
|
||||||
|
isinstance(got, list),
|
||||||
|
'Expected field %s to be a list, but it is of type %s' % (
|
||||||
|
info_field, type(got).__name__))
|
||||||
|
expected_num = int(expected.partition(':')[2])
|
||||||
|
assertGreaterEqual(
|
||||||
|
self, len(got), expected_num,
|
||||||
|
'Expected %d items in field %s, but only got %d' % (
|
||||||
|
expected_num, info_field, len(got)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
got = got_dict.get(info_field)
|
got = got_dict.get(info_field)
|
||||||
self.assertEqual(expected, got,
|
self.assertEqual(expected, got,
|
||||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||||
|
|
||||||
# Check for the presence of mandatory fields
|
# Check for the presence of mandatory fields
|
||||||
|
if got_dict.get('_type') != 'playlist':
|
||||||
for key in ('id', 'url', 'title', 'ext'):
|
for key in ('id', 'url', 'title', 'ext'):
|
||||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||||
self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
|
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
||||||
|
|
||||||
# Are checkable fields missing from the test case definition?
|
# Are checkable fields missing from the test case definition?
|
||||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||||
for key, value in got_dict.items()
|
for key, value in got_dict.items()
|
||||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
||||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||||
if missing_keys:
|
if missing_keys:
|
||||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
def _repr(v):
|
||||||
|
if isinstance(v, compat_str):
|
||||||
|
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
|
||||||
|
else:
|
||||||
|
return repr(v)
|
||||||
|
info_dict_str = ''
|
||||||
|
if len(missing_keys) != len(expected_dict):
|
||||||
|
info_dict_str += ''.join(
|
||||||
|
' %s: %s,\n' % (_repr(k), _repr(v))
|
||||||
|
for k, v in test_info_dict.items() if k not in missing_keys)
|
||||||
|
|
||||||
|
if info_dict_str:
|
||||||
|
info_dict_str += '\n'
|
||||||
|
info_dict_str += ''.join(
|
||||||
|
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k]))
|
||||||
|
for k in missing_keys)
|
||||||
|
write_string(
|
||||||
|
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
missing_keys,
|
missing_keys,
|
||||||
'Missing keys in test definition: %s' % (
|
'Missing keys in test definition: %s' % (
|
||||||
@@ -137,14 +188,33 @@ def expect_info_dict(self, expected_dict, got_dict):
|
|||||||
|
|
||||||
|
|
||||||
def assertRegexpMatches(self, text, regexp, msg=None):
|
def assertRegexpMatches(self, text, regexp, msg=None):
|
||||||
if hasattr(self, 'assertRegexpMatches'):
|
if hasattr(self, 'assertRegexp'):
|
||||||
return self.assertRegexpMatches(text, regexp, msg)
|
return self.assertRegexp(text, regexp, msg)
|
||||||
else:
|
else:
|
||||||
m = re.match(regexp, text)
|
m = re.match(regexp, text)
|
||||||
if not m:
|
if not m:
|
||||||
note = 'Regexp didn\'t match: %r not found in %r' % (regexp, text)
|
note = 'Regexp didn\'t match: %r not found' % (regexp)
|
||||||
|
if len(text) < 1000:
|
||||||
|
note += ' in %r' % text
|
||||||
if msg is None:
|
if msg is None:
|
||||||
msg = note
|
msg = note
|
||||||
else:
|
else:
|
||||||
msg = note + ', ' + msg
|
msg = note + ', ' + msg
|
||||||
self.assertTrue(m, msg)
|
self.assertTrue(m, msg)
|
||||||
|
|
||||||
|
|
||||||
|
def assertGreaterEqual(self, got, expected, msg=None):
|
||||||
|
if not (got >= expected):
|
||||||
|
if msg is None:
|
||||||
|
msg = '%r not greater than or equal to %r' % (got, expected)
|
||||||
|
self.assertTrue(got >= expected, msg)
|
||||||
|
|
||||||
|
|
||||||
|
def expect_warnings(ydl, warnings_re):
|
||||||
|
real_warning = ydl.report_warning
|
||||||
|
|
||||||
|
def _report_warning(w):
|
||||||
|
if not any(re.search(w_re, w) for w_re in warnings_re):
|
||||||
|
real_warning(w)
|
||||||
|
|
||||||
|
ydl.report_warning = _report_warning
|
||||||
|
|||||||
@@ -27,9 +27,8 @@
|
|||||||
"rejecttitle": null,
|
"rejecttitle": null,
|
||||||
"retries": 10,
|
"retries": 10,
|
||||||
"simulate": false,
|
"simulate": false,
|
||||||
"skip_download": false,
|
|
||||||
"subtitleslang": null,
|
"subtitleslang": null,
|
||||||
"subtitlesformat": "srt",
|
"subtitlesformat": "best",
|
||||||
"test": true,
|
"test": true,
|
||||||
"updatetime": true,
|
"updatetime": true,
|
||||||
"usenetrc": false,
|
"usenetrc": false,
|
||||||
@@ -40,5 +39,6 @@
|
|||||||
"writesubtitles": false,
|
"writesubtitles": false,
|
||||||
"allsubtitles": false,
|
"allsubtitles": false,
|
||||||
"listssubtitles": false,
|
"listssubtitles": false,
|
||||||
"socket_timeout": 20
|
"socket_timeout": 20,
|
||||||
|
"fixup": "never"
|
||||||
}
|
}
|
||||||
|
|||||||
1
test/swftests/.gitignore
vendored
Normal file
1
test/swftests/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
*.swf
|
||||||
19
test/swftests/ArrayAccess.as
Normal file
19
test/swftests/ArrayAccess.as
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
// input: [["a", "b", "c", "d"]]
|
||||||
|
// output: ["c", "b", "a", "d"]
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class ArrayAccess {
|
||||||
|
public static function main(ar:Array):Array {
|
||||||
|
var aa:ArrayAccess = new ArrayAccess();
|
||||||
|
return aa.f(ar, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
private function f(ar:Array, num:Number):Array{
|
||||||
|
var x:String = ar[0];
|
||||||
|
var y:String = ar[num % ar.length];
|
||||||
|
ar[0] = y;
|
||||||
|
ar[num] = x;
|
||||||
|
return ar;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
17
test/swftests/ClassCall.as
Normal file
17
test/swftests/ClassCall.as
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 121
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class ClassCall {
|
||||||
|
public static function main():int{
|
||||||
|
var f:OtherClass = new OtherClass();
|
||||||
|
return f.func(100,20);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class OtherClass {
|
||||||
|
public function func(x: int, y: int):int {
|
||||||
|
return x+y+1;
|
||||||
|
}
|
||||||
|
}
|
||||||
15
test/swftests/ClassConstruction.as
Normal file
15
test/swftests/ClassConstruction.as
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 0
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class ClassConstruction {
|
||||||
|
public static function main():int{
|
||||||
|
var f:Foo = new Foo();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class Foo {
|
||||||
|
|
||||||
|
}
|
||||||
18
test/swftests/ConstArrayAccess.as
Normal file
18
test/swftests/ConstArrayAccess.as
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 4
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class ConstArrayAccess {
|
||||||
|
private static const x:int = 2;
|
||||||
|
private static const ar:Array = ["42", "3411"];
|
||||||
|
|
||||||
|
public static function main():int{
|
||||||
|
var c:ConstArrayAccess = new ConstArrayAccess();
|
||||||
|
return c.f();
|
||||||
|
}
|
||||||
|
|
||||||
|
public function f(): int {
|
||||||
|
return ar[1].length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
12
test/swftests/ConstantInt.as
Normal file
12
test/swftests/ConstantInt.as
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 2
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class ConstantInt {
|
||||||
|
private static const x:int = 2;
|
||||||
|
|
||||||
|
public static function main():int{
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
10
test/swftests/DictCall.as
Normal file
10
test/swftests/DictCall.as
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
// input: [{"x": 1, "y": 2}]
|
||||||
|
// output: 3
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class DictCall {
|
||||||
|
public static function main(d:Object):int{
|
||||||
|
return d.x + d.y;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
10
test/swftests/EqualsOperator.as
Normal file
10
test/swftests/EqualsOperator.as
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
// input: []
|
||||||
|
// output: false
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class EqualsOperator {
|
||||||
|
public static function main():Boolean{
|
||||||
|
return 1 == 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
13
test/swftests/LocalVars.as
Normal file
13
test/swftests/LocalVars.as
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
// input: [1, 2]
|
||||||
|
// output: 3
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class LocalVars {
|
||||||
|
public static function main(a:int, b:int):int{
|
||||||
|
var c:int = a + b + b;
|
||||||
|
var d:int = c - b;
|
||||||
|
var e:int = d;
|
||||||
|
return e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
22
test/swftests/MemberAssignment.as
Normal file
22
test/swftests/MemberAssignment.as
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
// input: [1]
|
||||||
|
// output: 2
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class MemberAssignment {
|
||||||
|
public var v:int;
|
||||||
|
|
||||||
|
public function g():int {
|
||||||
|
return this.v;
|
||||||
|
}
|
||||||
|
|
||||||
|
public function f(a:int):int{
|
||||||
|
this.v = a;
|
||||||
|
return this.v + this.g();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static function main(a:int): int {
|
||||||
|
var v:MemberAssignment = new MemberAssignment();
|
||||||
|
return v.f(a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
24
test/swftests/NeOperator.as
Normal file
24
test/swftests/NeOperator.as
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 123
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class NeOperator {
|
||||||
|
public static function main(): int {
|
||||||
|
var res:int = 0;
|
||||||
|
if (1 != 2) {
|
||||||
|
res += 3;
|
||||||
|
} else {
|
||||||
|
res += 4;
|
||||||
|
}
|
||||||
|
if (2 != 2) {
|
||||||
|
res += 10;
|
||||||
|
} else {
|
||||||
|
res += 20;
|
||||||
|
}
|
||||||
|
if (9 == 9) {
|
||||||
|
res += 100;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
21
test/swftests/PrivateCall.as
Normal file
21
test/swftests/PrivateCall.as
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 9
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class PrivateCall {
|
||||||
|
public static function main():int{
|
||||||
|
var f:OtherClass = new OtherClass();
|
||||||
|
return f.func();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class OtherClass {
|
||||||
|
private function pf():int {
|
||||||
|
return 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
public function func():int {
|
||||||
|
return this.pf();
|
||||||
|
}
|
||||||
|
}
|
||||||
22
test/swftests/PrivateVoidCall.as
Normal file
22
test/swftests/PrivateVoidCall.as
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 9
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class PrivateVoidCall {
|
||||||
|
public static function main():int{
|
||||||
|
var f:OtherClass = new OtherClass();
|
||||||
|
f.func();
|
||||||
|
return 9;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class OtherClass {
|
||||||
|
private function pf():void {
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
public function func():void {
|
||||||
|
this.pf();
|
||||||
|
}
|
||||||
|
}
|
||||||
13
test/swftests/StaticAssignment.as
Normal file
13
test/swftests/StaticAssignment.as
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
// input: [1]
|
||||||
|
// output: 1
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StaticAssignment {
|
||||||
|
public static var v:int;
|
||||||
|
|
||||||
|
public static function main(a:int):int{
|
||||||
|
v = a;
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
16
test/swftests/StaticRetrieval.as
Normal file
16
test/swftests/StaticRetrieval.as
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 1
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StaticRetrieval {
|
||||||
|
public static var v:int;
|
||||||
|
|
||||||
|
public static function main():int{
|
||||||
|
if (v) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
11
test/swftests/StringBasics.as
Normal file
11
test/swftests/StringBasics.as
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 3
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StringBasics {
|
||||||
|
public static function main():int{
|
||||||
|
var s:String = "abc";
|
||||||
|
return s.length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
11
test/swftests/StringCharCodeAt.as
Normal file
11
test/swftests/StringCharCodeAt.as
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 9897
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StringCharCodeAt {
|
||||||
|
public static function main():int{
|
||||||
|
var s:String = "abc";
|
||||||
|
return s.charCodeAt(1) * 100 + s.charCodeAt();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
11
test/swftests/StringConversion.as
Normal file
11
test/swftests/StringConversion.as
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 2
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StringConversion {
|
||||||
|
public static function main():int{
|
||||||
|
var s:String = String(99);
|
||||||
|
return s.length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -40,5 +40,23 @@ class TestInfoExtractor(unittest.TestCase):
|
|||||||
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
|
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
|
||||||
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
|
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
|
||||||
|
|
||||||
|
def test_html_search_meta(self):
|
||||||
|
ie = self.ie
|
||||||
|
html = '''
|
||||||
|
<meta name="a" content="1" />
|
||||||
|
<meta name='b' content='2'>
|
||||||
|
<meta name="c" content='3'>
|
||||||
|
<meta name=d content='4'>
|
||||||
|
<meta property="e" content='5' >
|
||||||
|
<meta content="6" name="f">
|
||||||
|
'''
|
||||||
|
|
||||||
|
self.assertEqual(ie._html_search_meta('a', html), '1')
|
||||||
|
self.assertEqual(ie._html_search_meta('b', html), '2')
|
||||||
|
self.assertEqual(ie._html_search_meta('c', html), '3')
|
||||||
|
self.assertEqual(ie._html_search_meta('d', html), '4')
|
||||||
|
self.assertEqual(ie._html_search_meta('e', html), '5')
|
||||||
|
self.assertEqual(ie._html_search_meta('f', html), '6')
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -8,9 +8,15 @@ import sys
|
|||||||
import unittest
|
import unittest
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
import copy
|
||||||
|
|
||||||
from test.helper import FakeYDL, assertRegexpMatches
|
from test.helper import FakeYDL, assertRegexpMatches
|
||||||
from youtube_dl import YoutubeDL
|
from youtube_dl import YoutubeDL
|
||||||
from youtube_dl.extractor import YoutubeIE
|
from youtube_dl.extractor import YoutubeIE
|
||||||
|
from youtube_dl.postprocessor.common import PostProcessor
|
||||||
|
from youtube_dl.utils import match_filter_func
|
||||||
|
|
||||||
|
TEST_URL = 'http://localhost/sample.mp4'
|
||||||
|
|
||||||
|
|
||||||
class YDL(FakeYDL):
|
class YDL(FakeYDL):
|
||||||
@@ -43,8 +49,8 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = True
|
ydl.params['prefer_free_formats'] = True
|
||||||
formats = [
|
formats = [
|
||||||
{'ext': 'webm', 'height': 460, 'url': 'x'},
|
{'ext': 'webm', 'height': 460, 'url': TEST_URL},
|
||||||
{'ext': 'mp4', 'height': 460, 'url': 'y'},
|
{'ext': 'mp4', 'height': 460, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
yie = YoutubeIE(ydl)
|
yie = YoutubeIE(ydl)
|
||||||
@@ -57,8 +63,8 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = True
|
ydl.params['prefer_free_formats'] = True
|
||||||
formats = [
|
formats = [
|
||||||
{'ext': 'webm', 'height': 720, 'url': 'a'},
|
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
||||||
{'ext': 'mp4', 'height': 1080, 'url': 'b'},
|
{'ext': 'mp4', 'height': 1080, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict['formats'] = formats
|
info_dict['formats'] = formats
|
||||||
yie = YoutubeIE(ydl)
|
yie = YoutubeIE(ydl)
|
||||||
@@ -71,9 +77,9 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = False
|
ydl.params['prefer_free_formats'] = False
|
||||||
formats = [
|
formats = [
|
||||||
{'ext': 'webm', 'height': 720, 'url': '_'},
|
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
||||||
{'ext': 'mp4', 'height': 720, 'url': '_'},
|
{'ext': 'mp4', 'height': 720, 'url': TEST_URL},
|
||||||
{'ext': 'flv', 'height': 720, 'url': '_'},
|
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict['formats'] = formats
|
info_dict['formats'] = formats
|
||||||
yie = YoutubeIE(ydl)
|
yie = YoutubeIE(ydl)
|
||||||
@@ -85,8 +91,8 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = False
|
ydl.params['prefer_free_formats'] = False
|
||||||
formats = [
|
formats = [
|
||||||
{'ext': 'flv', 'height': 720, 'url': '_'},
|
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
|
||||||
{'ext': 'webm', 'height': 720, 'url': '_'},
|
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict['formats'] = formats
|
info_dict['formats'] = formats
|
||||||
yie = YoutubeIE(ydl)
|
yie = YoutubeIE(ydl)
|
||||||
@@ -130,10 +136,10 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
|
|
||||||
def test_format_selection(self):
|
def test_format_selection(self):
|
||||||
formats = [
|
formats = [
|
||||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': '_'},
|
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
||||||
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': '_'},
|
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
|
||||||
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': '_'},
|
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
|
||||||
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': '_'},
|
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
|
|
||||||
@@ -164,10 +170,10 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
|
|
||||||
def test_format_selection_audio(self):
|
def test_format_selection_audio(self):
|
||||||
formats = [
|
formats = [
|
||||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': '_'},
|
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
|
||||||
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': '_'},
|
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL},
|
||||||
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': '_'},
|
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': TEST_URL},
|
||||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': '_'},
|
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
|
|
||||||
@@ -182,8 +188,8 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
self.assertEqual(downloaded['format_id'], 'audio-low')
|
self.assertEqual(downloaded['format_id'], 'audio-low')
|
||||||
|
|
||||||
formats = [
|
formats = [
|
||||||
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': '_'},
|
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
||||||
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': '_'},
|
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
|
|
||||||
@@ -192,11 +198,42 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], 'vid-high')
|
self.assertEqual(downloaded['format_id'], 'vid-high')
|
||||||
|
|
||||||
|
def test_format_selection_audio_exts(self):
|
||||||
|
formats = [
|
||||||
|
{'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
|
||||||
|
{'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
|
||||||
|
{'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
|
||||||
|
{'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
|
||||||
|
{'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
|
||||||
|
]
|
||||||
|
|
||||||
|
info_dict = _make_result(formats)
|
||||||
|
ydl = YDL({'format': 'best'})
|
||||||
|
ie = YoutubeIE(ydl)
|
||||||
|
ie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'aac-64')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'mp3'})
|
||||||
|
ie = YoutubeIE(ydl)
|
||||||
|
ie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'mp3-64')
|
||||||
|
|
||||||
|
ydl = YDL({'prefer_free_formats': True})
|
||||||
|
ie = YoutubeIE(ydl)
|
||||||
|
ie._sort_formats(info_dict['formats'])
|
||||||
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'ogg-64')
|
||||||
|
|
||||||
def test_format_selection_video(self):
|
def test_format_selection_video(self):
|
||||||
formats = [
|
formats = [
|
||||||
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'},
|
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': TEST_URL},
|
||||||
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': '_'},
|
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': TEST_URL},
|
||||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': '_'},
|
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': TEST_URL},
|
||||||
]
|
]
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
|
|
||||||
@@ -218,10 +255,10 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
# 3D
|
# 3D
|
||||||
'85', '84', '102', '83', '101', '82', '100',
|
'85', '84', '102', '83', '101', '82', '100',
|
||||||
# Dash video
|
# Dash video
|
||||||
'138', '137', '248', '136', '247', '135', '246',
|
'137', '248', '136', '247', '135', '246',
|
||||||
'245', '244', '134', '243', '133', '242', '160',
|
'245', '244', '134', '243', '133', '242', '160',
|
||||||
# Dash audio
|
# Dash audio
|
||||||
'141', '172', '140', '139', '171',
|
'141', '172', '140', '171', '139',
|
||||||
]
|
]
|
||||||
|
|
||||||
for f1id, f2id in zip(order, order[1:]):
|
for f1id, f2id in zip(order, order[1:]):
|
||||||
@@ -248,6 +285,122 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], f1id)
|
self.assertEqual(downloaded['format_id'], f1id)
|
||||||
|
|
||||||
|
def test_format_filtering(self):
|
||||||
|
formats = [
|
||||||
|
{'format_id': 'A', 'filesize': 500, 'width': 1000},
|
||||||
|
{'format_id': 'B', 'filesize': 1000, 'width': 500},
|
||||||
|
{'format_id': 'C', 'filesize': 1000, 'width': 400},
|
||||||
|
{'format_id': 'D', 'filesize': 2000, 'width': 600},
|
||||||
|
{'format_id': 'E', 'filesize': 3000},
|
||||||
|
{'format_id': 'F'},
|
||||||
|
{'format_id': 'G', 'filesize': 1000000},
|
||||||
|
]
|
||||||
|
for f in formats:
|
||||||
|
f['url'] = 'http://_/'
|
||||||
|
f['ext'] = 'unknown'
|
||||||
|
info_dict = _make_result(formats)
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'best[filesize<3000]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'D')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'best[filesize<=3000]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'E')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'best[filesize <= ? 3000]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'F')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'best [filesize = 1000] [width>450]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'B')
|
||||||
|
|
||||||
|
ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'C')
|
||||||
|
|
||||||
|
ydl = YDL({'format': '[filesize>?1]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'G')
|
||||||
|
|
||||||
|
ydl = YDL({'format': '[filesize<1M]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'E')
|
||||||
|
|
||||||
|
ydl = YDL({'format': '[filesize<1MiB]'})
|
||||||
|
ydl.process_ie_result(info_dict)
|
||||||
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
self.assertEqual(downloaded['format_id'], 'G')
|
||||||
|
|
||||||
|
|
||||||
|
class TestYoutubeDL(unittest.TestCase):
|
||||||
|
def test_subtitles(self):
|
||||||
|
def s_formats(lang, autocaption=False):
|
||||||
|
return [{
|
||||||
|
'ext': ext,
|
||||||
|
'url': 'http://localhost/video.%s.%s' % (lang, ext),
|
||||||
|
'_auto': autocaption,
|
||||||
|
} for ext in ['vtt', 'srt', 'ass']]
|
||||||
|
subtitles = dict((l, s_formats(l)) for l in ['en', 'fr', 'es'])
|
||||||
|
auto_captions = dict((l, s_formats(l, True)) for l in ['it', 'pt', 'es'])
|
||||||
|
info_dict = {
|
||||||
|
'id': 'test',
|
||||||
|
'title': 'Test',
|
||||||
|
'url': 'http://localhost/video.mp4',
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'automatic_captions': auto_captions,
|
||||||
|
'extractor': 'TEST',
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_info(params={}):
|
||||||
|
params.setdefault('simulate', True)
|
||||||
|
ydl = YDL(params)
|
||||||
|
ydl.report_warning = lambda *args, **kargs: None
|
||||||
|
return ydl.process_video_result(info_dict, download=False)
|
||||||
|
|
||||||
|
result = get_info()
|
||||||
|
self.assertFalse(result.get('requested_subtitles'))
|
||||||
|
self.assertEqual(result['subtitles'], subtitles)
|
||||||
|
self.assertEqual(result['automatic_captions'], auto_captions)
|
||||||
|
|
||||||
|
result = get_info({'writesubtitles': True})
|
||||||
|
subs = result['requested_subtitles']
|
||||||
|
self.assertTrue(subs)
|
||||||
|
self.assertEqual(set(subs.keys()), set(['en']))
|
||||||
|
self.assertTrue(subs['en'].get('data') is None)
|
||||||
|
self.assertEqual(subs['en']['ext'], 'ass')
|
||||||
|
|
||||||
|
result = get_info({'writesubtitles': True, 'subtitlesformat': 'foo/srt'})
|
||||||
|
subs = result['requested_subtitles']
|
||||||
|
self.assertEqual(subs['en']['ext'], 'srt')
|
||||||
|
|
||||||
|
result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']})
|
||||||
|
subs = result['requested_subtitles']
|
||||||
|
self.assertTrue(subs)
|
||||||
|
self.assertEqual(set(subs.keys()), set(['es', 'fr']))
|
||||||
|
|
||||||
|
result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
|
||||||
|
subs = result['requested_subtitles']
|
||||||
|
self.assertTrue(subs)
|
||||||
|
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
|
||||||
|
self.assertFalse(subs['es']['_auto'])
|
||||||
|
self.assertTrue(subs['pt']['_auto'])
|
||||||
|
|
||||||
|
result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
|
||||||
|
subs = result['requested_subtitles']
|
||||||
|
self.assertTrue(subs)
|
||||||
|
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
|
||||||
|
self.assertTrue(subs['es']['_auto'])
|
||||||
|
self.assertTrue(subs['pt']['_auto'])
|
||||||
|
|
||||||
def test_add_extra_info(self):
|
def test_add_extra_info(self):
|
||||||
test_dict = {
|
test_dict = {
|
||||||
'extractor': 'Foo',
|
'extractor': 'Foo',
|
||||||
@@ -266,6 +419,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'width': None,
|
'width': None,
|
||||||
}
|
}
|
||||||
|
|
||||||
def fname(templ):
|
def fname(templ):
|
||||||
ydl = YoutubeDL({'outtmpl': templ})
|
ydl = YoutubeDL({'outtmpl': templ})
|
||||||
return ydl.prepare_filename(info)
|
return ydl.prepare_filename(info)
|
||||||
@@ -281,5 +435,102 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
'vbr': 10,
|
'vbr': 10,
|
||||||
}), '^\s*10k$')
|
}), '^\s*10k$')
|
||||||
|
|
||||||
|
def test_postprocessors(self):
|
||||||
|
filename = 'post-processor-testfile.mp4'
|
||||||
|
audiofile = filename + '.mp3'
|
||||||
|
|
||||||
|
class SimplePP(PostProcessor):
|
||||||
|
def run(self, info):
|
||||||
|
with open(audiofile, 'wt') as f:
|
||||||
|
f.write('EXAMPLE')
|
||||||
|
info['filepath']
|
||||||
|
return False, info
|
||||||
|
|
||||||
|
def run_pp(params):
|
||||||
|
with open(filename, 'wt') as f:
|
||||||
|
f.write('EXAMPLE')
|
||||||
|
ydl = YoutubeDL(params)
|
||||||
|
ydl.add_post_processor(SimplePP())
|
||||||
|
ydl.post_process(filename, {'filepath': filename})
|
||||||
|
|
||||||
|
run_pp({'keepvideo': True})
|
||||||
|
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
|
||||||
|
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
|
||||||
|
os.unlink(filename)
|
||||||
|
os.unlink(audiofile)
|
||||||
|
|
||||||
|
run_pp({'keepvideo': False})
|
||||||
|
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
|
||||||
|
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
|
||||||
|
os.unlink(audiofile)
|
||||||
|
|
||||||
|
def test_match_filter(self):
|
||||||
|
class FilterYDL(YDL):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(FilterYDL, self).__init__(*args, **kwargs)
|
||||||
|
self.params['simulate'] = True
|
||||||
|
|
||||||
|
def process_info(self, info_dict):
|
||||||
|
super(YDL, self).process_info(info_dict)
|
||||||
|
|
||||||
|
def _match_entry(self, info_dict, incomplete):
|
||||||
|
res = super(FilterYDL, self)._match_entry(info_dict, incomplete)
|
||||||
|
if res is None:
|
||||||
|
self.downloaded_info_dicts.append(info_dict)
|
||||||
|
return res
|
||||||
|
|
||||||
|
first = {
|
||||||
|
'id': '1',
|
||||||
|
'url': TEST_URL,
|
||||||
|
'title': 'one',
|
||||||
|
'extractor': 'TEST',
|
||||||
|
'duration': 30,
|
||||||
|
'filesize': 10 * 1024,
|
||||||
|
}
|
||||||
|
second = {
|
||||||
|
'id': '2',
|
||||||
|
'url': TEST_URL,
|
||||||
|
'title': 'two',
|
||||||
|
'extractor': 'TEST',
|
||||||
|
'duration': 10,
|
||||||
|
'description': 'foo',
|
||||||
|
'filesize': 5 * 1024,
|
||||||
|
}
|
||||||
|
videos = [first, second]
|
||||||
|
|
||||||
|
def get_videos(filter_=None):
|
||||||
|
ydl = FilterYDL({'match_filter': filter_})
|
||||||
|
for v in videos:
|
||||||
|
ydl.process_ie_result(v, download=True)
|
||||||
|
return [v['id'] for v in ydl.downloaded_info_dicts]
|
||||||
|
|
||||||
|
res = get_videos()
|
||||||
|
self.assertEqual(res, ['1', '2'])
|
||||||
|
|
||||||
|
def f(v):
|
||||||
|
if v['id'] == '1':
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return 'Video id is not 1'
|
||||||
|
res = get_videos(f)
|
||||||
|
self.assertEqual(res, ['1'])
|
||||||
|
|
||||||
|
f = match_filter_func('duration < 30')
|
||||||
|
res = get_videos(f)
|
||||||
|
self.assertEqual(res, ['2'])
|
||||||
|
|
||||||
|
f = match_filter_func('description = foo')
|
||||||
|
res = get_videos(f)
|
||||||
|
self.assertEqual(res, ['2'])
|
||||||
|
|
||||||
|
f = match_filter_func('description =? foo')
|
||||||
|
res = get_videos(f)
|
||||||
|
self.assertEqual(res, ['1', '2'])
|
||||||
|
|
||||||
|
f = match_filter_func('filesize > 5KiB')
|
||||||
|
res = get_videos(f)
|
||||||
|
self.assertEqual(res, ['1'])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
55
test/test_aes.py
Normal file
55
test/test_aes.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from youtube_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_decrypt_text
|
||||||
|
from youtube_dl.utils import bytes_to_intlist, intlist_to_bytes
|
||||||
|
import base64
|
||||||
|
|
||||||
|
# the encrypted data can be generate with 'devscripts/generate_aes_testdata.py'
|
||||||
|
|
||||||
|
|
||||||
|
class TestAES(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.key = self.iv = [0x20, 0x15] + 14 * [0]
|
||||||
|
self.secret_msg = b'Secret message goes here'
|
||||||
|
|
||||||
|
def test_encrypt(self):
|
||||||
|
msg = b'message'
|
||||||
|
key = list(range(16))
|
||||||
|
encrypted = aes_encrypt(bytes_to_intlist(msg), key)
|
||||||
|
decrypted = intlist_to_bytes(aes_decrypt(encrypted, key))
|
||||||
|
self.assertEqual(decrypted, msg)
|
||||||
|
|
||||||
|
def test_cbc_decrypt(self):
|
||||||
|
data = bytes_to_intlist(
|
||||||
|
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
|
||||||
|
)
|
||||||
|
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
|
||||||
|
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||||
|
|
||||||
|
def test_decrypt_text(self):
|
||||||
|
password = intlist_to_bytes(self.key).decode('utf-8')
|
||||||
|
encrypted = base64.b64encode(
|
||||||
|
intlist_to_bytes(self.iv[:8]) +
|
||||||
|
b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
|
||||||
|
)
|
||||||
|
decrypted = (aes_decrypt_text(encrypted, password, 16))
|
||||||
|
self.assertEqual(decrypted, self.secret_msg)
|
||||||
|
|
||||||
|
password = intlist_to_bytes(self.key).decode('utf-8')
|
||||||
|
encrypted = base64.b64encode(
|
||||||
|
intlist_to_bytes(self.iv[:8]) +
|
||||||
|
b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
|
||||||
|
)
|
||||||
|
decrypted = (aes_decrypt_text(encrypted, password, 32))
|
||||||
|
self.assertEqual(decrypted, self.secret_msg)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@@ -19,7 +20,7 @@ def _download_restricted(url, filename, age):
|
|||||||
'age_limit': age,
|
'age_limit': age,
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
'writeinfojson': True,
|
'writeinfojson': True,
|
||||||
"outtmpl": "%(id)s.%(ext)s",
|
'outtmpl': '%(id)s.%(ext)s',
|
||||||
}
|
}
|
||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params)
|
||||||
ydl.add_default_info_extractors()
|
ydl.add_default_info_extractors()
|
||||||
@@ -44,11 +45,6 @@ class TestAgeRestriction(unittest.TestCase):
|
|||||||
'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
||||||
'505835.mp4', 2, old_age=25)
|
'505835.mp4', 2, old_age=25)
|
||||||
|
|
||||||
def test_pornotube(self):
|
|
||||||
self._assert_restricted(
|
|
||||||
'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
|
|
||||||
'1689755.flv', 13)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -14,8 +14,6 @@ from test.helper import gettestcases
|
|||||||
from youtube_dl.extractor import (
|
from youtube_dl.extractor import (
|
||||||
FacebookIE,
|
FacebookIE,
|
||||||
gen_extractors,
|
gen_extractors,
|
||||||
JustinTVIE,
|
|
||||||
PBSIE,
|
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -61,7 +59,7 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
self.assertMatch('www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
|
self.assertMatch('www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
|
||||||
|
|
||||||
def test_youtube_feeds(self):
|
def test_youtube_feeds(self):
|
||||||
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watch_later'])
|
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watchlater'])
|
||||||
self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:subscriptions'])
|
self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:subscriptions'])
|
||||||
self.assertMatch('https://www.youtube.com/feed/recommended', ['youtube:recommended'])
|
self.assertMatch('https://www.youtube.com/feed/recommended', ['youtube:recommended'])
|
||||||
self.assertMatch('https://www.youtube.com/my_favorites', ['youtube:favorites'])
|
self.assertMatch('https://www.youtube.com/my_favorites', ['youtube:favorites'])
|
||||||
@@ -73,22 +71,6 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
||||||
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
||||||
|
|
||||||
def test_justin_tv_channelid_matching(self):
|
|
||||||
self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
|
|
||||||
self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
|
|
||||||
self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
|
|
||||||
self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
|
|
||||||
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
|
|
||||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
|
|
||||||
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
|
|
||||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
|
|
||||||
|
|
||||||
def test_justintv_videoid_matching(self):
|
|
||||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
|
|
||||||
|
|
||||||
def test_justin_tv_chapterid_matching(self):
|
|
||||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
|
|
||||||
|
|
||||||
def test_youtube_extract(self):
|
def test_youtube_extract(self):
|
||||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
@@ -100,6 +82,7 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
|
|
||||||
def test_facebook_matching(self):
|
def test_facebook_matching(self):
|
||||||
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
||||||
|
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/cindyweather?fref=ts#!/photo.php?v=10152183998945793'))
|
||||||
|
|
||||||
def test_no_duplicates(self):
|
def test_no_duplicates(self):
|
||||||
ies = gen_extractors()
|
ies = gen_extractors()
|
||||||
@@ -109,7 +92,9 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
|
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
|
||||||
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
|
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
|
||||||
else:
|
else:
|
||||||
self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))
|
self.assertFalse(
|
||||||
|
ie.suitable(url),
|
||||||
|
'%s should not match URL %r . That URL belongs to %s.' % (type(ie).__name__, url, tc['name']))
|
||||||
|
|
||||||
def test_keywords(self):
|
def test_keywords(self):
|
||||||
self.assertMatch(':ytsubs', ['youtube:subscriptions'])
|
self.assertMatch(':ytsubs', ['youtube:subscriptions'])
|
||||||
@@ -117,15 +102,13 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
self.assertMatch(':ythistory', ['youtube:history'])
|
self.assertMatch(':ythistory', ['youtube:history'])
|
||||||
self.assertMatch(':thedailyshow', ['ComedyCentralShows'])
|
self.assertMatch(':thedailyshow', ['ComedyCentralShows'])
|
||||||
self.assertMatch(':tds', ['ComedyCentralShows'])
|
self.assertMatch(':tds', ['ComedyCentralShows'])
|
||||||
self.assertMatch(':colbertreport', ['ComedyCentralShows'])
|
|
||||||
self.assertMatch(':cr', ['ComedyCentralShows'])
|
|
||||||
|
|
||||||
def test_vimeo_matching(self):
|
def test_vimeo_matching(self):
|
||||||
self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
|
self.assertMatch('https://vimeo.com/channels/tributes', ['vimeo:channel'])
|
||||||
self.assertMatch('http://vimeo.com/channels/31259', ['vimeo:channel'])
|
self.assertMatch('https://vimeo.com/channels/31259', ['vimeo:channel'])
|
||||||
self.assertMatch('http://vimeo.com/channels/31259/53576664', ['vimeo'])
|
self.assertMatch('https://vimeo.com/channels/31259/53576664', ['vimeo'])
|
||||||
self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
|
self.assertMatch('https://vimeo.com/user7108434', ['vimeo:user'])
|
||||||
self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user'])
|
self.assertMatch('https://vimeo.com/user7108434/videos', ['vimeo:user'])
|
||||||
self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
|
self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
|
||||||
|
|
||||||
# https://github.com/rg3/youtube-dl/issues/1930
|
# https://github.com/rg3/youtube-dl/issues/1930
|
||||||
@@ -141,32 +124,6 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
|
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
|
||||||
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
|
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
|
||||||
|
|
||||||
def test_ComedyCentralShows(self):
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
self.assertMatch(
|
|
||||||
'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
|
|
||||||
['ComedyCentralShows'])
|
|
||||||
|
|
||||||
def test_yahoo_https(self):
|
def test_yahoo_https(self):
|
||||||
# https://github.com/rg3/youtube-dl/issues/2701
|
# https://github.com/rg3/youtube-dl/issues/2701
|
||||||
self.assertMatch(
|
self.assertMatch(
|
||||||
|
|||||||
59
test/test_cache.py
Normal file
59
test/test_cache.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
from test.helper import FakeYDL
|
||||||
|
from youtube_dl.cache import Cache
|
||||||
|
|
||||||
|
|
||||||
|
def _is_empty(d):
|
||||||
|
return not bool(os.listdir(d))
|
||||||
|
|
||||||
|
|
||||||
|
def _mkdir(d):
|
||||||
|
if not os.path.exists(d):
|
||||||
|
os.mkdir(d)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCache(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
|
||||||
|
_mkdir(TESTDATA_DIR)
|
||||||
|
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
|
||||||
|
self.tearDown()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
if os.path.exists(self.test_dir):
|
||||||
|
shutil.rmtree(self.test_dir)
|
||||||
|
|
||||||
|
def test_cache(self):
|
||||||
|
ydl = FakeYDL({
|
||||||
|
'cachedir': self.test_dir,
|
||||||
|
})
|
||||||
|
c = Cache(ydl)
|
||||||
|
obj = {'x': 1, 'y': ['ä', '\\a', True]}
|
||||||
|
self.assertEqual(c.load('test_cache', 'k.'), None)
|
||||||
|
c.store('test_cache', 'k.', obj)
|
||||||
|
self.assertEqual(c.load('test_cache', 'k2'), None)
|
||||||
|
self.assertFalse(_is_empty(self.test_dir))
|
||||||
|
self.assertEqual(c.load('test_cache', 'k.'), obj)
|
||||||
|
self.assertEqual(c.load('test_cache', 'y'), None)
|
||||||
|
self.assertEqual(c.load('test_cache2', 'k.'), None)
|
||||||
|
c.remove()
|
||||||
|
self.assertFalse(os.path.exists(self.test_dir))
|
||||||
|
self.assertEqual(c.load('test_cache', 'k.'), None)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
46
test/test_compat.py
Normal file
46
test/test_compat.py
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
from youtube_dl.utils import get_filesystem_encoding
|
||||||
|
from youtube_dl.compat import (
|
||||||
|
compat_getenv,
|
||||||
|
compat_expanduser,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCompat(unittest.TestCase):
|
||||||
|
def test_compat_getenv(self):
|
||||||
|
test_str = 'тест'
|
||||||
|
os.environ['YOUTUBE-DL-TEST'] = (
|
||||||
|
test_str if sys.version_info >= (3, 0)
|
||||||
|
else test_str.encode(get_filesystem_encoding()))
|
||||||
|
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
||||||
|
|
||||||
|
def test_compat_expanduser(self):
|
||||||
|
old_home = os.environ.get('HOME')
|
||||||
|
test_str = 'C:\Documents and Settings\тест\Application Data'
|
||||||
|
os.environ['HOME'] = (
|
||||||
|
test_str if sys.version_info >= (3, 0)
|
||||||
|
else test_str.encode(get_filesystem_encoding()))
|
||||||
|
self.assertEqual(compat_expanduser('~'), test_str)
|
||||||
|
os.environ['HOME'] = old_home
|
||||||
|
|
||||||
|
def test_all_present(self):
|
||||||
|
import youtube_dl.compat
|
||||||
|
all_names = youtube_dl.compat.__all__
|
||||||
|
present_names = set(filter(
|
||||||
|
lambda c: '_' in c and not c.startswith('_'),
|
||||||
|
dir(youtube_dl.compat))) - set(['unicode_literals'])
|
||||||
|
self.assertEqual(all_names, sorted(present_names))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -7,10 +9,11 @@ import unittest
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import (
|
from test.helper import (
|
||||||
|
assertGreaterEqual,
|
||||||
|
expect_warnings,
|
||||||
get_params,
|
get_params,
|
||||||
gettestcases,
|
gettestcases,
|
||||||
expect_info_dict,
|
expect_info_dict,
|
||||||
md5,
|
|
||||||
try_rm,
|
try_rm,
|
||||||
report_warning,
|
report_warning,
|
||||||
)
|
)
|
||||||
@@ -22,31 +25,37 @@ import json
|
|||||||
import socket
|
import socket
|
||||||
|
|
||||||
import youtube_dl.YoutubeDL
|
import youtube_dl.YoutubeDL
|
||||||
from youtube_dl.utils import (
|
from youtube_dl.compat import (
|
||||||
compat_http_client,
|
compat_http_client,
|
||||||
compat_str,
|
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
|
)
|
||||||
|
from youtube_dl.utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
format_bytes,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
)
|
)
|
||||||
from youtube_dl.extractor import get_info_extractor
|
from youtube_dl.extractor import get_info_extractor
|
||||||
|
|
||||||
RETRIES = 3
|
RETRIES = 3
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.to_stderr = self.to_screen
|
self.to_stderr = self.to_screen
|
||||||
self.processed_info_dicts = []
|
self.processed_info_dicts = []
|
||||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
def report_warning(self, message):
|
def report_warning(self, message):
|
||||||
# Don't accept warnings during tests
|
# Don't accept warnings during tests
|
||||||
raise ExtractorError(message)
|
raise ExtractorError(message)
|
||||||
|
|
||||||
def process_info(self, info_dict):
|
def process_info(self, info_dict):
|
||||||
self.processed_info_dicts.append(info_dict)
|
self.processed_info_dicts.append(info_dict)
|
||||||
return super(YoutubeDL, self).process_info(info_dict)
|
return super(YoutubeDL, self).process_info(info_dict)
|
||||||
|
|
||||||
|
|
||||||
def _file_md5(fn):
|
def _file_md5(fn):
|
||||||
with open(fn, 'rb') as f:
|
with open(fn, 'rb') as f:
|
||||||
return hashlib.md5(f.read()).hexdigest()
|
return hashlib.md5(f.read()).hexdigest()
|
||||||
@@ -56,48 +65,65 @@ defs = gettestcases()
|
|||||||
|
|
||||||
class TestDownload(unittest.TestCase):
|
class TestDownload(unittest.TestCase):
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.defs = defs
|
self.defs = defs
|
||||||
|
|
||||||
### Dynamically generate tests
|
# Dynamically generate tests
|
||||||
|
|
||||||
|
|
||||||
def generator(test_case):
|
def generator(test_case):
|
||||||
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
||||||
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
|
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
|
||||||
|
is_playlist = any(k.startswith('playlist') for k in test_case)
|
||||||
|
test_cases = test_case.get(
|
||||||
|
'playlist', [] if is_playlist else [test_case])
|
||||||
|
|
||||||
def print_skipping(reason):
|
def print_skipping(reason):
|
||||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
print_skipping('IE marked as not _WORKING')
|
print_skipping('IE marked as not _WORKING')
|
||||||
return
|
return
|
||||||
if 'playlist' not in test_case:
|
|
||||||
info_dict = test_case.get('info_dict', {})
|
for tc in test_cases:
|
||||||
if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
|
info_dict = tc.get('info_dict', {})
|
||||||
|
if not (info_dict.get('id') and info_dict.get('ext')):
|
||||||
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
|
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
|
||||||
|
|
||||||
if 'skip' in test_case:
|
if 'skip' in test_case:
|
||||||
print_skipping(test_case['skip'])
|
print_skipping(test_case['skip'])
|
||||||
return
|
return
|
||||||
for other_ie in other_ies:
|
for other_ie in other_ies:
|
||||||
if not other_ie.working():
|
if not other_ie.working():
|
||||||
print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
||||||
return
|
return
|
||||||
|
|
||||||
params = get_params(test_case.get('params', {}))
|
params = get_params(test_case.get('params', {}))
|
||||||
|
if is_playlist and 'playlist' not in test_case:
|
||||||
|
params.setdefault('extract_flat', True)
|
||||||
|
params.setdefault('skip_download', True)
|
||||||
|
|
||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params, auto_init=False)
|
||||||
ydl.add_default_info_extractors()
|
ydl.add_default_info_extractors()
|
||||||
finished_hook_called = set()
|
finished_hook_called = set()
|
||||||
|
|
||||||
def _hook(status):
|
def _hook(status):
|
||||||
if status['status'] == 'finished':
|
if status['status'] == 'finished':
|
||||||
finished_hook_called.add(status['filename'])
|
finished_hook_called.add(status['filename'])
|
||||||
ydl.add_progress_hook(_hook)
|
ydl.add_progress_hook(_hook)
|
||||||
|
expect_warnings(ydl, test_case.get('expected_warnings', []))
|
||||||
|
|
||||||
def get_tc_filename(tc):
|
def get_tc_filename(tc):
|
||||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
return ydl.prepare_filename(tc.get('info_dict', {}))
|
||||||
|
|
||||||
test_cases = test_case.get('playlist', [test_case])
|
res_dict = None
|
||||||
def try_rm_tcs_files():
|
|
||||||
for tc in test_cases:
|
def try_rm_tcs_files(tcs=None):
|
||||||
|
if tcs is None:
|
||||||
|
tcs = test_cases
|
||||||
|
for tc in tcs:
|
||||||
tc_filename = get_tc_filename(tc)
|
tc_filename = get_tc_filename(tc)
|
||||||
try_rm(tc_filename)
|
try_rm(tc_filename)
|
||||||
try_rm(tc_filename + '.part')
|
try_rm(tc_filename + '.part')
|
||||||
@@ -107,14 +133,17 @@ def generator(test_case):
|
|||||||
try_num = 1
|
try_num = 1
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
ydl.download([test_case['url']])
|
# We're not using .download here sine that is just a shim
|
||||||
|
# for outside error handling, and returns the exit code
|
||||||
|
# instead of the result dict.
|
||||||
|
res_dict = ydl.extract_info(test_case['url'])
|
||||||
except (DownloadError, ExtractorError) as err:
|
except (DownloadError, ExtractorError) as err:
|
||||||
# Check if the exception is not a network related one
|
# Check if the exception is not a network related one
|
||||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if try_num == RETRIES:
|
if try_num == RETRIES:
|
||||||
report_warning(u'Failed due to network errors, skipping...')
|
report_warning('Failed due to network errors, skipping...')
|
||||||
return
|
return
|
||||||
|
|
||||||
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
|
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
|
||||||
@@ -123,34 +152,78 @@ def generator(test_case):
|
|||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
if is_playlist:
|
||||||
|
self.assertEqual(res_dict['_type'], 'playlist')
|
||||||
|
self.assertTrue('entries' in res_dict)
|
||||||
|
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
|
||||||
|
|
||||||
|
if 'playlist_mincount' in test_case:
|
||||||
|
assertGreaterEqual(
|
||||||
|
self,
|
||||||
|
len(res_dict['entries']),
|
||||||
|
test_case['playlist_mincount'],
|
||||||
|
'Expected at least %d in playlist %s, but got only %d' % (
|
||||||
|
test_case['playlist_mincount'], test_case['url'],
|
||||||
|
len(res_dict['entries'])))
|
||||||
|
if 'playlist_count' in test_case:
|
||||||
|
self.assertEqual(
|
||||||
|
len(res_dict['entries']),
|
||||||
|
test_case['playlist_count'],
|
||||||
|
'Expected %d entries in playlist %s, but got %d.' % (
|
||||||
|
test_case['playlist_count'],
|
||||||
|
test_case['url'],
|
||||||
|
len(res_dict['entries']),
|
||||||
|
))
|
||||||
|
if 'playlist_duration_sum' in test_case:
|
||||||
|
got_duration = sum(e['duration'] for e in res_dict['entries'])
|
||||||
|
self.assertEqual(
|
||||||
|
test_case['playlist_duration_sum'], got_duration)
|
||||||
|
|
||||||
for tc in test_cases:
|
for tc in test_cases:
|
||||||
tc_filename = get_tc_filename(tc)
|
tc_filename = get_tc_filename(tc)
|
||||||
if not test_case.get('params', {}).get('skip_download', False):
|
if not test_case.get('params', {}).get('skip_download', False):
|
||||||
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
|
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
|
||||||
self.assertTrue(tc_filename in finished_hook_called)
|
self.assertTrue(tc_filename in finished_hook_called)
|
||||||
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
expected_minsize = tc.get('file_minsize', 10000)
|
||||||
self.assertTrue(os.path.exists(info_json_fn))
|
if expected_minsize is not None:
|
||||||
|
if params.get('test'):
|
||||||
|
expected_minsize = max(expected_minsize, 10000)
|
||||||
|
got_fsize = os.path.getsize(tc_filename)
|
||||||
|
assertGreaterEqual(
|
||||||
|
self, got_fsize, expected_minsize,
|
||||||
|
'Expected %s to be at least %s, but it\'s only %s ' %
|
||||||
|
(tc_filename, format_bytes(expected_minsize),
|
||||||
|
format_bytes(got_fsize)))
|
||||||
if 'md5' in tc:
|
if 'md5' in tc:
|
||||||
md5_for_file = _file_md5(tc_filename)
|
md5_for_file = _file_md5(tc_filename)
|
||||||
self.assertEqual(md5_for_file, tc['md5'])
|
self.assertEqual(md5_for_file, tc['md5'])
|
||||||
|
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
||||||
|
self.assertTrue(
|
||||||
|
os.path.exists(info_json_fn),
|
||||||
|
'Missing info file %s' % info_json_fn)
|
||||||
with io.open(info_json_fn, encoding='utf-8') as infof:
|
with io.open(info_json_fn, encoding='utf-8') as infof:
|
||||||
info_dict = json.load(infof)
|
info_dict = json.load(infof)
|
||||||
|
|
||||||
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
|
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
|
||||||
finally:
|
finally:
|
||||||
try_rm_tcs_files()
|
try_rm_tcs_files()
|
||||||
|
if is_playlist and res_dict is not None and res_dict.get('entries'):
|
||||||
|
# Remove all other files that may have been extracted if the
|
||||||
|
# extractor returns full results even with extract_flat
|
||||||
|
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
|
||||||
|
try_rm_tcs_files(res_tcs)
|
||||||
|
|
||||||
return test_template
|
return test_template
|
||||||
|
|
||||||
### And add them to TestDownload
|
# And add them to TestDownload
|
||||||
for n, test_case in enumerate(defs):
|
for n, test_case in enumerate(defs):
|
||||||
test_method = generator(test_case)
|
test_method = generator(test_case)
|
||||||
tname = 'test_' + str(test_case['name'])
|
tname = 'test_' + str(test_case['name'])
|
||||||
i = 1
|
i = 1
|
||||||
while hasattr(TestDownload, tname):
|
while hasattr(TestDownload, tname):
|
||||||
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
tname = 'test_%s_%d' % (test_case['name'], i)
|
||||||
i += 1
|
i += 1
|
||||||
test_method.__name__ = tname
|
test_method.__name__ = str(tname)
|
||||||
setattr(TestDownload, test_method.__name__, test_method)
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
del test_method
|
del test_method
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,8 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@@ -6,11 +11,13 @@ import subprocess
|
|||||||
|
|
||||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_DEV_NULL = subprocess.DEVNULL
|
_DEV_NULL = subprocess.DEVNULL
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
_DEV_NULL = open(os.devnull, 'wb')
|
_DEV_NULL = open(os.devnull, 'wb')
|
||||||
|
|
||||||
|
|
||||||
class TestExecution(unittest.TestCase):
|
class TestExecution(unittest.TestCase):
|
||||||
def test_import(self):
|
def test_import(self):
|
||||||
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
||||||
@@ -22,5 +29,12 @@ class TestExecution(unittest.TestCase):
|
|||||||
def test_main_exec(self):
|
def test_main_exec(self):
|
||||||
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||||
|
|
||||||
|
def test_cmdline_umlauts(self):
|
||||||
|
p = subprocess.Popen(
|
||||||
|
[sys.executable, 'youtube_dl/__main__.py', 'ä', '--version'],
|
||||||
|
cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE)
|
||||||
|
_, stderr = p.communicate()
|
||||||
|
self.assertFalse(stderr)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
119
test/test_http.py
Normal file
119
test/test_http.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from youtube_dl import YoutubeDL
|
||||||
|
from youtube_dl.compat import compat_http_server, compat_urllib_request
|
||||||
|
import ssl
|
||||||
|
import threading
|
||||||
|
|
||||||
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||||
|
def log_message(self, format, *args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
if self.path == '/video.html':
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'text/html; charset=utf-8')
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
|
||||||
|
elif self.path == '/vid.mp4':
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'video/mp4')
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(b'\x00\x00\x00\x00\x20\x66\x74[video]')
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
|
class FakeLogger(object):
|
||||||
|
def debug(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def warning(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def error(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TestHTTP(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
|
self.httpd = compat_http_server.HTTPServer(
|
||||||
|
('localhost', 0), HTTPTestRequestHandler)
|
||||||
|
self.httpd.socket = ssl.wrap_socket(
|
||||||
|
self.httpd.socket, certfile=certfn, server_side=True)
|
||||||
|
self.port = self.httpd.socket.getsockname()[1]
|
||||||
|
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||||
|
self.server_thread.daemon = True
|
||||||
|
self.server_thread.start()
|
||||||
|
|
||||||
|
def test_nocheckcertificate(self):
|
||||||
|
if sys.version_info >= (2, 7, 9): # No certificate checking anyways
|
||||||
|
ydl = YoutubeDL({'logger': FakeLogger()})
|
||||||
|
self.assertRaises(
|
||||||
|
Exception,
|
||||||
|
ydl.extract_info, 'https://localhost:%d/video.html' % self.port)
|
||||||
|
|
||||||
|
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
||||||
|
r = ydl.extract_info('https://localhost:%d/video.html' % self.port)
|
||||||
|
self.assertEqual(r['url'], 'https://localhost:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
|
|
||||||
|
def _build_proxy_handler(name):
|
||||||
|
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
||||||
|
proxy_name = name
|
||||||
|
|
||||||
|
def log_message(self, format, *args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'text/plain; charset=utf-8')
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode('utf-8'))
|
||||||
|
return HTTPTestRequestHandler
|
||||||
|
|
||||||
|
|
||||||
|
class TestProxy(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.proxy = compat_http_server.HTTPServer(
|
||||||
|
('localhost', 0), _build_proxy_handler('normal'))
|
||||||
|
self.port = self.proxy.socket.getsockname()[1]
|
||||||
|
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
|
||||||
|
self.proxy_thread.daemon = True
|
||||||
|
self.proxy_thread.start()
|
||||||
|
|
||||||
|
self.cn_proxy = compat_http_server.HTTPServer(
|
||||||
|
('localhost', 0), _build_proxy_handler('cn'))
|
||||||
|
self.cn_port = self.cn_proxy.socket.getsockname()[1]
|
||||||
|
self.cn_proxy_thread = threading.Thread(target=self.cn_proxy.serve_forever)
|
||||||
|
self.cn_proxy_thread.daemon = True
|
||||||
|
self.cn_proxy_thread.start()
|
||||||
|
|
||||||
|
def test_proxy(self):
|
||||||
|
cn_proxy = 'localhost:{0}'.format(self.cn_port)
|
||||||
|
ydl = YoutubeDL({
|
||||||
|
'proxy': 'localhost:{0}'.format(self.port),
|
||||||
|
'cn_verification_proxy': cn_proxy,
|
||||||
|
})
|
||||||
|
url = 'http://foo.com/bar'
|
||||||
|
response = ydl.urlopen(url).read().decode('utf-8')
|
||||||
|
self.assertEqual(response, 'normal: {0}'.format(url))
|
||||||
|
|
||||||
|
req = compat_urllib_request.Request(url)
|
||||||
|
req.add_header('Ytdl-request-proxy', cn_proxy)
|
||||||
|
response = ydl.urlopen(req).read().decode('utf-8')
|
||||||
|
self.assertEqual(response, 'cn: {0}'.format(url))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
106
test/test_jsinterp.py
Normal file
106
test/test_jsinterp.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from youtube_dl.jsinterp import JSInterpreter
|
||||||
|
|
||||||
|
|
||||||
|
class TestJSInterpreter(unittest.TestCase):
|
||||||
|
def test_basic(self):
|
||||||
|
jsi = JSInterpreter('function x(){;}')
|
||||||
|
self.assertEqual(jsi.call_function('x'), None)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function x3(){return 42;}')
|
||||||
|
self.assertEqual(jsi.call_function('x3'), 42)
|
||||||
|
|
||||||
|
def test_calc(self):
|
||||||
|
jsi = JSInterpreter('function x4(a){return 2*a+1;}')
|
||||||
|
self.assertEqual(jsi.call_function('x4', 3), 7)
|
||||||
|
|
||||||
|
def test_empty_return(self):
|
||||||
|
jsi = JSInterpreter('function f(){return; y()}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), None)
|
||||||
|
|
||||||
|
def test_morespace(self):
|
||||||
|
jsi = JSInterpreter('function x (a) { return 2 * a + 1 ; }')
|
||||||
|
self.assertEqual(jsi.call_function('x', 3), 7)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f () { x = 2 ; return x; }')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 2)
|
||||||
|
|
||||||
|
def test_strange_chars(self):
|
||||||
|
jsi = JSInterpreter('function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }')
|
||||||
|
self.assertEqual(jsi.call_function('$_xY1', 20), 21)
|
||||||
|
|
||||||
|
def test_operators(self):
|
||||||
|
jsi = JSInterpreter('function f(){return 1 << 5;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 32)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return 19 & 21;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 17)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return 11 >> 2;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 2)
|
||||||
|
|
||||||
|
def test_array_access(self):
|
||||||
|
jsi = JSInterpreter('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2] = 7; return x;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), [5, 2, 7])
|
||||||
|
|
||||||
|
def test_parens(self):
|
||||||
|
jsi = JSInterpreter('function f(){return (1) + (2) * ((( (( (((((3)))))) )) ));}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 7)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){return (1 + 2) * 3;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 9)
|
||||||
|
|
||||||
|
def test_assignments(self):
|
||||||
|
jsi = JSInterpreter('function f(){var x = 20; x = 30 + 1; return x;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 31)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){var x = 20; x += 30 + 1; return x;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 51)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('function f(){var x = 20; x -= 30 + 1; return x;}')
|
||||||
|
self.assertEqual(jsi.call_function('f'), -11)
|
||||||
|
|
||||||
|
def test_comments(self):
|
||||||
|
'Skipping: Not yet fully implemented'
|
||||||
|
return
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() {
|
||||||
|
var x = /* 1 + */ 2;
|
||||||
|
var y = /* 30
|
||||||
|
* 40 */ 50;
|
||||||
|
return x + y;
|
||||||
|
}
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 52)
|
||||||
|
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function f() {
|
||||||
|
var x = "/*";
|
||||||
|
var y = 1 /* comment */ + 2;
|
||||||
|
return y;
|
||||||
|
}
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('f'), 3)
|
||||||
|
|
||||||
|
def test_precedence(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() {
|
||||||
|
var a = [10, 20, 30, 40, 50];
|
||||||
|
var b = 6;
|
||||||
|
a[0]=a[b%a.length];
|
||||||
|
return a;
|
||||||
|
}''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [20, 20, 30, 40, 50])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
26
test/test_netrc.py
Normal file
26
test/test_netrc.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
from youtube_dl.extractor import (
|
||||||
|
gen_extractors,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestNetRc(unittest.TestCase):
|
||||||
|
def test_netrc_present(self):
|
||||||
|
for ie in gen_extractors():
|
||||||
|
if not hasattr(ie, '_login'):
|
||||||
|
continue
|
||||||
|
self.assertTrue(
|
||||||
|
hasattr(ie, '_NETRC_MACHINE'),
|
||||||
|
'Extractor %s supports login, but is missing a _NETRC_MACHINE property' % ie.IE_NAME)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -1,399 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# encoding: utf-8
|
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
from test.helper import (
|
|
||||||
assertRegexpMatches,
|
|
||||||
expect_info_dict,
|
|
||||||
FakeYDL,
|
|
||||||
)
|
|
||||||
|
|
||||||
from youtube_dl.extractor import (
|
|
||||||
AcademicEarthCourseIE,
|
|
||||||
DailymotionPlaylistIE,
|
|
||||||
DailymotionUserIE,
|
|
||||||
VimeoChannelIE,
|
|
||||||
VimeoUserIE,
|
|
||||||
VimeoAlbumIE,
|
|
||||||
VimeoGroupsIE,
|
|
||||||
VineUserIE,
|
|
||||||
UstreamChannelIE,
|
|
||||||
SoundcloudSetIE,
|
|
||||||
SoundcloudUserIE,
|
|
||||||
SoundcloudPlaylistIE,
|
|
||||||
TeacherTubeUserIE,
|
|
||||||
LivestreamIE,
|
|
||||||
LivestreamOriginalIE,
|
|
||||||
NHLVideocenterIE,
|
|
||||||
BambuserChannelIE,
|
|
||||||
BandcampAlbumIE,
|
|
||||||
SmotriCommunityIE,
|
|
||||||
SmotriUserIE,
|
|
||||||
IviCompilationIE,
|
|
||||||
ImdbListIE,
|
|
||||||
KhanAcademyIE,
|
|
||||||
EveryonesMixtapeIE,
|
|
||||||
RutubeChannelIE,
|
|
||||||
RutubePersonIE,
|
|
||||||
GoogleSearchIE,
|
|
||||||
GenericIE,
|
|
||||||
TEDIE,
|
|
||||||
ToypicsUserIE,
|
|
||||||
XTubeUserIE,
|
|
||||||
InstagramUserIE,
|
|
||||||
CSpanIE,
|
|
||||||
AolIE,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestPlaylists(unittest.TestCase):
|
|
||||||
def assertIsPlaylist(self, info):
|
|
||||||
"""Make sure the info has '_type' set to 'playlist'"""
|
|
||||||
self.assertEqual(info['_type'], 'playlist')
|
|
||||||
|
|
||||||
def test_dailymotion_playlist(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = DailymotionPlaylistIE(dl)
|
|
||||||
result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'SPORT')
|
|
||||||
self.assertTrue(len(result['entries']) > 20)
|
|
||||||
|
|
||||||
def test_dailymotion_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = DailymotionUserIE(dl)
|
|
||||||
result = ie.extract('https://www.dailymotion.com/user/nqtv')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'Rémi Gaillard')
|
|
||||||
self.assertTrue(len(result['entries']) >= 100)
|
|
||||||
|
|
||||||
def test_vimeo_channel(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = VimeoChannelIE(dl)
|
|
||||||
result = ie.extract('http://vimeo.com/channels/tributes')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'Vimeo Tributes')
|
|
||||||
self.assertTrue(len(result['entries']) > 24)
|
|
||||||
|
|
||||||
def test_vimeo_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = VimeoUserIE(dl)
|
|
||||||
result = ie.extract('http://vimeo.com/nkistudio/videos')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'Nki')
|
|
||||||
self.assertTrue(len(result['entries']) > 65)
|
|
||||||
|
|
||||||
def test_vimeo_album(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = VimeoAlbumIE(dl)
|
|
||||||
result = ie.extract('http://vimeo.com/album/2632481')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'Staff Favorites: November 2013')
|
|
||||||
self.assertTrue(len(result['entries']) > 12)
|
|
||||||
|
|
||||||
def test_vimeo_groups(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = VimeoGroupsIE(dl)
|
|
||||||
result = ie.extract('http://vimeo.com/groups/rolexawards')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'Rolex Awards for Enterprise')
|
|
||||||
self.assertTrue(len(result['entries']) > 72)
|
|
||||||
|
|
||||||
def test_vine_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = VineUserIE(dl)
|
|
||||||
result = ie.extract('https://vine.co/Visa')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertTrue(len(result['entries']) >= 50)
|
|
||||||
|
|
||||||
def test_ustream_channel(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = UstreamChannelIE(dl)
|
|
||||||
result = ie.extract('http://www.ustream.tv/channel/channeljapan')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '10874166')
|
|
||||||
self.assertTrue(len(result['entries']) >= 54)
|
|
||||||
|
|
||||||
def test_soundcloud_set(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = SoundcloudSetIE(dl)
|
|
||||||
result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'The Royal Concept EP')
|
|
||||||
self.assertTrue(len(result['entries']) >= 6)
|
|
||||||
|
|
||||||
def test_soundcloud_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = SoundcloudUserIE(dl)
|
|
||||||
result = ie.extract('https://soundcloud.com/the-concept-band')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '9615865')
|
|
||||||
self.assertTrue(len(result['entries']) >= 12)
|
|
||||||
|
|
||||||
def test_soundcloud_likes(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = SoundcloudUserIE(dl)
|
|
||||||
result = ie.extract('https://soundcloud.com/the-concept-band/likes')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '9615865')
|
|
||||||
self.assertTrue(len(result['entries']) >= 1)
|
|
||||||
|
|
||||||
def test_soundcloud_playlist(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = SoundcloudPlaylistIE(dl)
|
|
||||||
result = ie.extract('http://api.soundcloud.com/playlists/4110309')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '4110309')
|
|
||||||
self.assertEqual(result['title'], 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]')
|
|
||||||
assertRegexpMatches(
|
|
||||||
self, result['description'], r'TILT Brass - Bowery Poetry Club')
|
|
||||||
self.assertEqual(len(result['entries']), 6)
|
|
||||||
|
|
||||||
def test_livestream_event(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = LivestreamIE(dl)
|
|
||||||
result = ie.extract('http://new.livestream.com/tedx/cityenglish')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'TEDCity2.0 (English)')
|
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
|
||||||
|
|
||||||
def test_livestreamoriginal_folder(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = LivestreamOriginalIE(dl)
|
|
||||||
result = ie.extract('https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'a07bf706-d0e4-4e75-a747-b021d84f2fd3')
|
|
||||||
self.assertTrue(len(result['entries']) >= 28)
|
|
||||||
|
|
||||||
def test_nhl_videocenter(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = NHLVideocenterIE(dl)
|
|
||||||
result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '999')
|
|
||||||
self.assertEqual(result['title'], 'Highlights')
|
|
||||||
self.assertEqual(len(result['entries']), 12)
|
|
||||||
|
|
||||||
def test_bambuser_channel(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = BambuserChannelIE(dl)
|
|
||||||
result = ie.extract('http://bambuser.com/channel/pixelversity')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'pixelversity')
|
|
||||||
self.assertTrue(len(result['entries']) >= 60)
|
|
||||||
|
|
||||||
def test_bandcamp_album(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = BandcampAlbumIE(dl)
|
|
||||||
result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'Nightmare Night EP')
|
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
|
||||||
|
|
||||||
def test_smotri_community(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = SmotriCommunityIE(dl)
|
|
||||||
result = ie.extract('http://smotri.com/community/video/kommuna')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'kommuna')
|
|
||||||
self.assertEqual(result['title'], 'КПРФ')
|
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
|
||||||
|
|
||||||
def test_smotri_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = SmotriUserIE(dl)
|
|
||||||
result = ie.extract('http://smotri.com/user/inspector')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'inspector')
|
|
||||||
self.assertEqual(result['title'], 'Inspector')
|
|
||||||
self.assertTrue(len(result['entries']) >= 9)
|
|
||||||
|
|
||||||
def test_AcademicEarthCourse(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = AcademicEarthCourseIE(dl)
|
|
||||||
result = ie.extract('http://academicearth.org/playlists/laws-of-nature/')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'laws-of-nature')
|
|
||||||
self.assertEqual(result['title'], 'Laws of Nature')
|
|
||||||
self.assertEqual(result['description'],u'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.')# u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
|
|
||||||
self.assertEqual(len(result['entries']), 4)
|
|
||||||
|
|
||||||
def test_ivi_compilation(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = IviCompilationIE(dl)
|
|
||||||
result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'dvoe_iz_lartsa')
|
|
||||||
self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008)')
|
|
||||||
self.assertTrue(len(result['entries']) >= 24)
|
|
||||||
|
|
||||||
def test_ivi_compilation_season(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = IviCompilationIE(dl)
|
|
||||||
result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa/season1')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'dvoe_iz_lartsa/season1')
|
|
||||||
self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008) 1 сезон')
|
|
||||||
self.assertTrue(len(result['entries']) >= 12)
|
|
||||||
|
|
||||||
def test_imdb_list(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = ImdbListIE(dl)
|
|
||||||
result = ie.extract('http://www.imdb.com/list/JFs9NWw6XI0')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'JFs9NWw6XI0')
|
|
||||||
self.assertEqual(result['title'], 'March 23, 2012 Releases')
|
|
||||||
self.assertEqual(len(result['entries']), 7)
|
|
||||||
|
|
||||||
def test_khanacademy_topic(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = KhanAcademyIE(dl)
|
|
||||||
result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'cryptography')
|
|
||||||
self.assertEqual(result['title'], 'Journey into cryptography')
|
|
||||||
self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?')
|
|
||||||
self.assertTrue(len(result['entries']) >= 3)
|
|
||||||
|
|
||||||
def test_EveryonesMixtape(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = EveryonesMixtapeIE(dl)
|
|
||||||
result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'm7m0jJAbMQi')
|
|
||||||
self.assertEqual(result['title'], 'Driving')
|
|
||||||
self.assertEqual(len(result['entries']), 24)
|
|
||||||
|
|
||||||
def test_rutube_channel(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = RutubeChannelIE(dl)
|
|
||||||
result = ie.extract('http://rutube.ru/tags/video/1800/')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '1800')
|
|
||||||
self.assertTrue(len(result['entries']) >= 68)
|
|
||||||
|
|
||||||
def test_rutube_person(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = RutubePersonIE(dl)
|
|
||||||
result = ie.extract('http://rutube.ru/video/person/313878/')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '313878')
|
|
||||||
self.assertTrue(len(result['entries']) >= 37)
|
|
||||||
|
|
||||||
def test_multiple_brightcove_videos(self):
|
|
||||||
# https://github.com/rg3/youtube-dl/issues/2283
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = GenericIE(dl)
|
|
||||||
result = ie.extract('http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'always-never-nuclear-command-and-control')
|
|
||||||
self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
|
|
||||||
self.assertEqual(len(result['entries']), 3)
|
|
||||||
|
|
||||||
def test_GoogleSearch(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = GoogleSearchIE(dl)
|
|
||||||
result = ie.extract('gvsearch15:python language')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'python language')
|
|
||||||
self.assertEqual(result['title'], 'python language')
|
|
||||||
self.assertEqual(len(result['entries']), 15)
|
|
||||||
|
|
||||||
def test_generic_rss_feed(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = GenericIE(dl)
|
|
||||||
result = ie.extract('http://phihag.de/2014/youtube-dl/rss.xml')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'http://phihag.de/2014/youtube-dl/rss.xml')
|
|
||||||
self.assertEqual(result['title'], 'Zero Punctuation')
|
|
||||||
self.assertTrue(len(result['entries']) > 10)
|
|
||||||
|
|
||||||
def test_ted_playlist(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = TEDIE(dl)
|
|
||||||
result = ie.extract('http://www.ted.com/playlists/who_are_the_hackers')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '10')
|
|
||||||
self.assertEqual(result['title'], 'Who are the hackers?')
|
|
||||||
self.assertTrue(len(result['entries']) >= 6)
|
|
||||||
|
|
||||||
def test_toypics_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = ToypicsUserIE(dl)
|
|
||||||
result = ie.extract('http://videos.toypics.net/Mikey')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'Mikey')
|
|
||||||
self.assertTrue(len(result['entries']) >= 17)
|
|
||||||
|
|
||||||
def test_xtube_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = XTubeUserIE(dl)
|
|
||||||
result = ie.extract('http://www.xtube.com/community/profile.php?user=greenshowers')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'greenshowers')
|
|
||||||
self.assertTrue(len(result['entries']) >= 155)
|
|
||||||
|
|
||||||
def test_InstagramUser(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = InstagramUserIE(dl)
|
|
||||||
result = ie.extract('http://instagram.com/porsche')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'porsche')
|
|
||||||
self.assertTrue(len(result['entries']) >= 2)
|
|
||||||
test_video = next(
|
|
||||||
e for e in result['entries']
|
|
||||||
if e['id'] == '614605558512799803_462752227')
|
|
||||||
dl.add_default_extra_info(test_video, ie, '(irrelevant URL)')
|
|
||||||
dl.process_video_result(test_video, download=False)
|
|
||||||
EXPECTED = {
|
|
||||||
'id': '614605558512799803_462752227',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': '#Porsche Intelligent Performance.',
|
|
||||||
'thumbnail': 're:^https?://.*\.jpg',
|
|
||||||
'uploader': 'Porsche',
|
|
||||||
'uploader_id': 'porsche',
|
|
||||||
'timestamp': 1387486713,
|
|
||||||
'upload_date': '20131219',
|
|
||||||
}
|
|
||||||
expect_info_dict(self, EXPECTED, test_video)
|
|
||||||
|
|
||||||
def test_CSpan_playlist(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = CSpanIE(dl)
|
|
||||||
result = ie.extract(
|
|
||||||
'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '342759')
|
|
||||||
self.assertEqual(
|
|
||||||
result['title'], 'General Motors Ignition Switch Recall')
|
|
||||||
whole_duration = sum(e['duration'] for e in result['entries'])
|
|
||||||
self.assertEqual(whole_duration, 14855)
|
|
||||||
|
|
||||||
def test_aol_playlist(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = AolIE(dl)
|
|
||||||
result = ie.extract(
|
|
||||||
'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], '152147')
|
|
||||||
self.assertEqual(
|
|
||||||
result['title'], 'Brace Yourself - Today\'s Weirdest News')
|
|
||||||
self.assertTrue(len(result['entries']) >= 10)
|
|
||||||
|
|
||||||
def test_TeacherTubeUser(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = TeacherTubeUserIE(dl)
|
|
||||||
result = ie.extract('http://www.teachertube.com/user/profile/rbhagwati2')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['id'], 'rbhagwati2')
|
|
||||||
self.assertTrue(len(result['entries']) >= 179)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
17
test/test_postprocessors.py
Normal file
17
test/test_postprocessors.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from youtube_dl.postprocessor import MetadataFromTitlePP
|
||||||
|
|
||||||
|
|
||||||
|
class TestMetadataFromTitle(unittest.TestCase):
|
||||||
|
def test_format_to_regex(self):
|
||||||
|
pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s')
|
||||||
|
self.assertEqual(pp._titleregex, '(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@@ -15,50 +16,58 @@ from youtube_dl.extractor import (
|
|||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
TEDIE,
|
TEDIE,
|
||||||
VimeoIE,
|
VimeoIE,
|
||||||
|
WallaIE,
|
||||||
|
CeskaTelevizeIE,
|
||||||
|
LyndaIE,
|
||||||
|
NPOIE,
|
||||||
|
ComedyCentralIE,
|
||||||
|
NRKTVIE,
|
||||||
|
RaiIE,
|
||||||
|
VikiIE,
|
||||||
|
ThePlatformIE,
|
||||||
|
RTVEALaCartaIE,
|
||||||
|
FunnyOrDieIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BaseTestSubtitles(unittest.TestCase):
|
class BaseTestSubtitles(unittest.TestCase):
|
||||||
url = None
|
url = None
|
||||||
IE = None
|
IE = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.DL = FakeYDL()
|
self.DL = FakeYDL()
|
||||||
self.ie = self.IE(self.DL)
|
self.ie = self.IE()
|
||||||
|
self.DL.add_info_extractor(self.ie)
|
||||||
|
|
||||||
def getInfoDict(self):
|
def getInfoDict(self):
|
||||||
info_dict = self.ie.extract(self.url)
|
info_dict = self.DL.extract_info(self.url, download=False)
|
||||||
return info_dict
|
return info_dict
|
||||||
|
|
||||||
def getSubtitles(self):
|
def getSubtitles(self):
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
return info_dict['subtitles']
|
subtitles = info_dict['requested_subtitles']
|
||||||
|
if not subtitles:
|
||||||
|
return subtitles
|
||||||
|
for sub_info in subtitles.values():
|
||||||
|
if sub_info.get('data') is None:
|
||||||
|
uf = self.DL.urlopen(sub_info['url'])
|
||||||
|
sub_info['data'] = uf.read().decode('utf-8')
|
||||||
|
return dict((l, sub_info['data']) for l, sub_info in subtitles.items())
|
||||||
|
|
||||||
|
|
||||||
class TestYoutubeSubtitles(BaseTestSubtitles):
|
class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||||
url = 'QRS8MkLhQmM'
|
url = 'QRS8MkLhQmM'
|
||||||
IE = YoutubeIE
|
IE = YoutubeIE
|
||||||
|
|
||||||
def test_youtube_no_writesubtitles(self):
|
|
||||||
self.DL.params['writesubtitles'] = False
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(subtitles, None)
|
|
||||||
|
|
||||||
def test_youtube_subtitles(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
|
|
||||||
|
|
||||||
def test_youtube_subtitles_lang(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
self.DL.params['subtitleslangs'] = ['it']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
|
|
||||||
|
|
||||||
def test_youtube_allsubtitles(self):
|
def test_youtube_allsubtitles(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(len(subtitles.keys()), 13)
|
self.assertEqual(len(subtitles.keys()), 13)
|
||||||
|
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
|
||||||
|
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
|
||||||
|
for lang in ['it', 'fr', 'de']:
|
||||||
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
def test_youtube_subtitles_sbv_format(self):
|
def test_youtube_subtitles_sbv_format(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
@@ -72,12 +81,6 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
||||||
|
|
||||||
def test_youtube_list_subtitles(self):
|
|
||||||
self.DL.expect_warning(u'Video doesn\'t have automatic captions')
|
|
||||||
self.DL.params['listsubtitles'] = True
|
|
||||||
info_dict = self.getInfoDict()
|
|
||||||
self.assertEqual(info_dict, None)
|
|
||||||
|
|
||||||
def test_youtube_automatic_captions(self):
|
def test_youtube_automatic_captions(self):
|
||||||
self.url = '8YoUxe5ncPo'
|
self.url = '8YoUxe5ncPo'
|
||||||
self.DL.params['writeautomaticsub'] = True
|
self.DL.params['writeautomaticsub'] = True
|
||||||
@@ -85,138 +88,66 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertTrue(subtitles['it'] is not None)
|
self.assertTrue(subtitles['it'] is not None)
|
||||||
|
|
||||||
|
def test_youtube_translated_subtitles(self):
|
||||||
|
# This video has a subtitles track, which can be translated
|
||||||
|
self.url = 'Ky9eprVWzlI'
|
||||||
|
self.DL.params['writeautomaticsub'] = True
|
||||||
|
self.DL.params['subtitleslangs'] = ['it']
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertTrue(subtitles['it'] is not None)
|
||||||
|
|
||||||
def test_youtube_nosubtitles(self):
|
def test_youtube_nosubtitles(self):
|
||||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'sAjKT8FhjI8'
|
self.url = 'n5BB19UTcdA'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(len(subtitles), 0)
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
def test_youtube_multiple_langs(self):
|
|
||||||
self.url = 'QRS8MkLhQmM'
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
langs = ['it', 'fr', 'de']
|
|
||||||
self.DL.params['subtitleslangs'] = langs
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
for lang in langs:
|
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
|
||||||
|
|
||||||
|
|
||||||
class TestDailymotionSubtitles(BaseTestSubtitles):
|
class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.dailymotion.com/video/xczg00'
|
url = 'http://www.dailymotion.com/video/xczg00'
|
||||||
IE = DailymotionIE
|
IE = DailymotionIE
|
||||||
|
|
||||||
def test_no_writesubtitles(self):
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(subtitles, None)
|
|
||||||
|
|
||||||
def test_subtitles(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
|
||||||
|
|
||||||
def test_subtitles_lang(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
self.DL.params['subtitleslangs'] = ['fr']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(len(subtitles.keys()), 5)
|
self.assertTrue(len(subtitles.keys()) >= 6)
|
||||||
|
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
||||||
def test_list_subtitles(self):
|
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
for lang in ['es', 'fr', 'de']:
|
||||||
self.DL.params['listsubtitles'] = True
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
info_dict = self.getInfoDict()
|
|
||||||
self.assertEqual(info_dict, None)
|
|
||||||
|
|
||||||
def test_automatic_captions(self):
|
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
|
||||||
self.DL.params['writeautomaticsub'] = True
|
|
||||||
self.DL.params['subtitleslang'] = ['en']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertTrue(len(subtitles.keys()) == 0)
|
|
||||||
|
|
||||||
def test_nosubtitles(self):
|
def test_nosubtitles(self):
|
||||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
|
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(len(subtitles), 0)
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
def test_multiple_langs(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
langs = ['es', 'fr', 'de']
|
|
||||||
self.DL.params['subtitleslangs'] = langs
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
for lang in langs:
|
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
|
||||||
|
|
||||||
|
|
||||||
class TestTedSubtitles(BaseTestSubtitles):
|
class TestTedSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
||||||
IE = TEDIE
|
IE = TEDIE
|
||||||
|
|
||||||
def test_no_writesubtitles(self):
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(subtitles, None)
|
|
||||||
|
|
||||||
def test_subtitles(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
|
||||||
|
|
||||||
def test_subtitles_lang(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
self.DL.params['subtitleslangs'] = ['fr']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertTrue(len(subtitles.keys()) >= 28)
|
self.assertTrue(len(subtitles.keys()) >= 28)
|
||||||
|
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
||||||
def test_list_subtitles(self):
|
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
for lang in ['es', 'fr', 'de']:
|
||||||
self.DL.params['listsubtitles'] = True
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
info_dict = self.getInfoDict()
|
|
||||||
self.assertEqual(info_dict, None)
|
|
||||||
|
|
||||||
def test_automatic_captions(self):
|
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
|
||||||
self.DL.params['writeautomaticsub'] = True
|
|
||||||
self.DL.params['subtitleslang'] = ['en']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertTrue(len(subtitles.keys()) == 0)
|
|
||||||
|
|
||||||
def test_multiple_langs(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
langs = ['es', 'fr', 'de']
|
|
||||||
self.DL.params['subtitleslangs'] = langs
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
for lang in langs:
|
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
|
||||||
|
|
||||||
|
|
||||||
class TestBlipTVSubtitles(BaseTestSubtitles):
|
class TestBlipTVSubtitles(BaseTestSubtitles):
|
||||||
url = 'http://blip.tv/a/a-6603250'
|
url = 'http://blip.tv/a/a-6603250'
|
||||||
IE = BlipTVIE
|
IE = BlipTVIE
|
||||||
|
|
||||||
def test_list_subtitles(self):
|
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
|
||||||
self.DL.params['listsubtitles'] = True
|
|
||||||
info_dict = self.getInfoDict()
|
|
||||||
self.assertEqual(info_dict, None)
|
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
@@ -228,55 +159,178 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
|||||||
url = 'http://vimeo.com/76979871'
|
url = 'http://vimeo.com/76979871'
|
||||||
IE = VimeoIE
|
IE = VimeoIE
|
||||||
|
|
||||||
def test_no_writesubtitles(self):
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(subtitles, None)
|
|
||||||
|
|
||||||
def test_subtitles(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
|
|
||||||
|
|
||||||
def test_subtitles_lang(self):
|
|
||||||
self.DL.params['writesubtitles'] = True
|
|
||||||
self.DL.params['subtitleslangs'] = ['fr']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
|
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
|
||||||
def test_list_subtitles(self):
|
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
|
||||||
self.DL.params['listsubtitles'] = True
|
|
||||||
info_dict = self.getInfoDict()
|
|
||||||
self.assertEqual(info_dict, None)
|
|
||||||
|
|
||||||
def test_automatic_captions(self):
|
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
|
||||||
self.DL.params['writeautomaticsub'] = True
|
|
||||||
self.DL.params['subtitleslang'] = ['en']
|
|
||||||
subtitles = self.getSubtitles()
|
|
||||||
self.assertTrue(len(subtitles.keys()) == 0)
|
|
||||||
|
|
||||||
def test_nosubtitles(self):
|
def test_nosubtitles(self):
|
||||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'http://vimeo.com/56015672'
|
self.url = 'http://vimeo.com/56015672'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertEqual(len(subtitles), 0)
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
def test_multiple_langs(self):
|
|
||||||
|
class TestWallaSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
|
||||||
|
IE = WallaIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
langs = ['es', 'fr', 'de']
|
self.DL.params['allsubtitles'] = True
|
||||||
self.DL.params['subtitleslangs'] = langs
|
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
for lang in langs:
|
self.assertEqual(set(subtitles.keys()), set(['heb']))
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
|
||||||
|
|
||||||
|
def test_nosubtitles(self):
|
||||||
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
|
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
|
||||||
|
IE = CeskaTelevizeIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['cs']))
|
||||||
|
self.assertTrue(len(subtitles['cs']) > 20000)
|
||||||
|
|
||||||
|
def test_nosubtitles(self):
|
||||||
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
|
self.url = 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220'
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertFalse(subtitles)
|
||||||
|
|
||||||
|
|
||||||
|
class TestLyndaSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
|
||||||
|
IE = LyndaIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
|
||||||
|
|
||||||
|
|
||||||
|
class TestNPOSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
|
||||||
|
IE = NPOIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['nl']))
|
||||||
|
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
|
||||||
|
|
||||||
|
|
||||||
|
class TestMTVSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother'
|
||||||
|
IE = ComedyCentralIE
|
||||||
|
|
||||||
|
def getInfoDict(self):
|
||||||
|
return super(TestMTVSubtitles, self).getInfoDict()['entries'][0]
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), 'b9f6ca22a6acf597ec76f61749765e65')
|
||||||
|
|
||||||
|
|
||||||
|
class TestNRKSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1'
|
||||||
|
IE = NRKTVIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['no']))
|
||||||
|
self.assertEqual(md5(subtitles['no']), '1d221e6458c95c5494dcd38e6a1f129a')
|
||||||
|
|
||||||
|
|
||||||
|
class TestRaiSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
|
||||||
|
IE = RaiIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['it']))
|
||||||
|
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
|
||||||
|
|
||||||
|
|
||||||
|
class TestVikiSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
|
||||||
|
IE = VikiIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a')
|
||||||
|
|
||||||
|
|
||||||
|
class TestThePlatformSubtitles(BaseTestSubtitles):
|
||||||
|
# from http://www.3playmedia.com/services-features/tools/integrations/theplatform/
|
||||||
|
# (see http://theplatform.com/about/partners/type/subtitles-closed-captioning/)
|
||||||
|
url = 'theplatform:JFUjUE1_ehvq'
|
||||||
|
IE = ThePlatformIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
||||||
|
|
||||||
|
|
||||||
|
class TestRtveSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
|
||||||
|
IE = RTVEALaCartaIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
print('Skipping, only available from Spain')
|
||||||
|
return
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['es']))
|
||||||
|
self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca')
|
||||||
|
|
||||||
|
|
||||||
|
class TestFunnyOrDieSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://www.funnyordie.com/videos/224829ff6d/judd-apatow-will-direct-your-vine'
|
||||||
|
IE = FunnyOrDieIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
80
test/test_swfinterp.py
Normal file
80
test/test_swfinterp.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from youtube_dl.swfinterp import SWFInterpreter
|
||||||
|
|
||||||
|
|
||||||
|
TEST_DIR = os.path.join(
|
||||||
|
os.path.dirname(os.path.abspath(__file__)), 'swftests')
|
||||||
|
|
||||||
|
|
||||||
|
class TestSWFInterpreter(unittest.TestCase):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _make_testfunc(testfile):
|
||||||
|
m = re.match(r'^(.*)\.(as)$', testfile)
|
||||||
|
if not m:
|
||||||
|
return
|
||||||
|
test_id = m.group(1)
|
||||||
|
|
||||||
|
def test_func(self):
|
||||||
|
as_file = os.path.join(TEST_DIR, testfile)
|
||||||
|
swf_file = os.path.join(TEST_DIR, test_id + '.swf')
|
||||||
|
if ((not os.path.exists(swf_file)) or
|
||||||
|
os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
|
||||||
|
# Recompile
|
||||||
|
try:
|
||||||
|
subprocess.check_call([
|
||||||
|
'mxmlc', '-output', swf_file,
|
||||||
|
'-static-link-runtime-shared-libraries', as_file])
|
||||||
|
except OSError as ose:
|
||||||
|
if ose.errno == errno.ENOENT:
|
||||||
|
print('mxmlc not found! Skipping test.')
|
||||||
|
return
|
||||||
|
raise
|
||||||
|
|
||||||
|
with open(swf_file, 'rb') as swf_f:
|
||||||
|
swf_content = swf_f.read()
|
||||||
|
swfi = SWFInterpreter(swf_content)
|
||||||
|
|
||||||
|
with io.open(as_file, 'r', encoding='utf-8') as as_f:
|
||||||
|
as_content = as_f.read()
|
||||||
|
|
||||||
|
def _find_spec(key):
|
||||||
|
m = re.search(
|
||||||
|
r'(?m)^//\s*%s:\s*(.*?)\n' % re.escape(key), as_content)
|
||||||
|
if not m:
|
||||||
|
raise ValueError('Cannot find %s in %s' % (key, testfile))
|
||||||
|
return json.loads(m.group(1))
|
||||||
|
|
||||||
|
input_args = _find_spec('input')
|
||||||
|
output = _find_spec('output')
|
||||||
|
|
||||||
|
swf_class = swfi.extract_class(test_id)
|
||||||
|
func = swfi.extract_function(swf_class, 'main')
|
||||||
|
res = func(input_args)
|
||||||
|
self.assertEqual(res, output)
|
||||||
|
|
||||||
|
test_func.__name__ = str('test_swf_' + test_id)
|
||||||
|
setattr(TestSWFInterpreter, test_func.__name__, test_func)
|
||||||
|
|
||||||
|
|
||||||
|
for testfile in os.listdir(TEST_DIR):
|
||||||
|
_make_testfunc(testfile)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
@@ -1,23 +1,38 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import io
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import re
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
import io
|
||||||
|
import re
|
||||||
|
|
||||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
IGNORED_FILES = [
|
IGNORED_FILES = [
|
||||||
'setup.py', # http://bugs.python.org/issue13943
|
'setup.py', # http://bugs.python.org/issue13943
|
||||||
|
'conf.py',
|
||||||
|
'buildserver.py',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
IGNORED_DIRS = [
|
||||||
|
'.git',
|
||||||
|
'.tox',
|
||||||
|
]
|
||||||
|
|
||||||
|
from test.helper import assertRegexpMatches
|
||||||
|
|
||||||
|
|
||||||
class TestUnicodeLiterals(unittest.TestCase):
|
class TestUnicodeLiterals(unittest.TestCase):
|
||||||
def test_all_files(self):
|
def test_all_files(self):
|
||||||
print('Skipping this test (not yet fully implemented)')
|
for dirpath, dirnames, filenames in os.walk(rootDir):
|
||||||
return
|
for ignore_dir in IGNORED_DIRS:
|
||||||
|
if ignore_dir in dirnames:
|
||||||
for dirpath, _, filenames in os.walk(rootDir):
|
# If we remove the directory from dirnames os.walk won't
|
||||||
|
# recurse into it
|
||||||
|
dirnames.remove(ignore_dir)
|
||||||
for basename in filenames:
|
for basename in filenames:
|
||||||
if not basename.endswith('.py'):
|
if not basename.endswith('.py'):
|
||||||
continue
|
continue
|
||||||
@@ -30,10 +45,11 @@ class TestUnicodeLiterals(unittest.TestCase):
|
|||||||
|
|
||||||
if "'" not in code and '"' not in code:
|
if "'" not in code and '"' not in code:
|
||||||
continue
|
continue
|
||||||
imps = 'from __future__ import unicode_literals'
|
assertRegexpMatches(
|
||||||
self.assertTrue(
|
self,
|
||||||
imps in code,
|
code,
|
||||||
' %s missing in %s' % (imps, fn))
|
r'(?:(?:#.*?|\s*)\n)*from __future__ import (?:[a-z_]+,\s*)*unicode_literals',
|
||||||
|
'unicode_literals import missing in %s' % fn)
|
||||||
|
|
||||||
m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
|
m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@@ -13,39 +15,51 @@ import io
|
|||||||
import json
|
import json
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
#from youtube_dl.utils import htmlentity_transform
|
|
||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
|
age_restricted,
|
||||||
|
args_to_str,
|
||||||
|
clean_html,
|
||||||
DateRange,
|
DateRange,
|
||||||
|
detect_exe_version,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
escape_rfc3986,
|
||||||
|
escape_url,
|
||||||
|
ExtractorError,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
get_meta_content,
|
InAdvancePagedList,
|
||||||
|
intlist_to_bytes,
|
||||||
|
is_html,
|
||||||
|
js_to_json,
|
||||||
|
limit_length,
|
||||||
|
OnDemandPagedList,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
PagedList,
|
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_filesize,
|
||||||
|
parse_iso8601,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
|
sanitize_path,
|
||||||
|
sanitize_url_path_consecutive_slashes,
|
||||||
shell_quote,
|
shell_quote,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
strip_jsonp,
|
||||||
struct_unpack,
|
struct_unpack,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
|
uppercase_escape,
|
||||||
url_basename,
|
url_basename,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
version_tuple,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
parse_iso8601,
|
xpath_text,
|
||||||
strip_jsonp,
|
render_table,
|
||||||
uppercase_escape,
|
match_str,
|
||||||
)
|
)
|
||||||
|
|
||||||
if sys.version_info < (3, 0):
|
|
||||||
_compat_str = lambda b: b.decode('unicode-escape')
|
|
||||||
else:
|
|
||||||
_compat_str = lambda s: s
|
|
||||||
|
|
||||||
|
|
||||||
class TestUtil(unittest.TestCase):
|
class TestUtil(unittest.TestCase):
|
||||||
def test_timeconvert(self):
|
def test_timeconvert(self):
|
||||||
@@ -67,11 +81,20 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual('this - that', sanitize_filename('this: that'))
|
self.assertEqual('this - that', sanitize_filename('this: that'))
|
||||||
|
|
||||||
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
||||||
aumlaut = _compat_str('\xe4')
|
aumlaut = 'ä'
|
||||||
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
|
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
|
||||||
tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
|
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
|
||||||
self.assertEqual(sanitize_filename(tests), tests)
|
self.assertEqual(sanitize_filename(tests), tests)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_filename('New World record at 0:12:34'),
|
||||||
|
'New World record at 0_12_34')
|
||||||
|
|
||||||
|
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
|
||||||
|
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
|
||||||
|
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
|
||||||
|
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
|
||||||
|
|
||||||
forbidden = '"\0\\/'
|
forbidden = '"\0\\/'
|
||||||
for fc in forbidden:
|
for fc in forbidden:
|
||||||
for fbc in forbidden:
|
for fbc in forbidden:
|
||||||
@@ -91,9 +114,9 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
|
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
|
||||||
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
|
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
|
||||||
|
|
||||||
tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
|
tests = 'a\xe4b\u4e2d\u56fd\u7684c'
|
||||||
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
|
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
|
||||||
self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
|
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
|
||||||
|
|
||||||
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
|
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
|
||||||
for fc in forbidden:
|
for fc in forbidden:
|
||||||
@@ -101,8 +124,8 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
|
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
|
||||||
|
|
||||||
# Handle a common case more neatly
|
# Handle a common case more neatly
|
||||||
self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
|
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
|
||||||
self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
|
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
|
||||||
# .. but make sure the file name is never empty
|
# .. but make sure the file name is never empty
|
||||||
self.assertTrue(sanitize_filename('-', restricted=True) != '')
|
self.assertTrue(sanitize_filename('-', restricted=True) != '')
|
||||||
self.assertTrue(sanitize_filename(':', restricted=True) != '')
|
self.assertTrue(sanitize_filename(':', restricted=True) != '')
|
||||||
@@ -112,6 +135,62 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
||||||
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
|
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
|
||||||
|
|
||||||
|
def test_sanitize_path(self):
|
||||||
|
if sys.platform != 'win32':
|
||||||
|
return
|
||||||
|
|
||||||
|
self.assertEqual(sanitize_path('abc'), 'abc')
|
||||||
|
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
|
||||||
|
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
|
||||||
|
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
|
||||||
|
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
|
||||||
|
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
|
||||||
|
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
|
||||||
|
|
||||||
|
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
|
||||||
|
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
|
||||||
|
|
||||||
|
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
|
||||||
|
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
|
||||||
|
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
|
||||||
|
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
|
||||||
|
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
|
||||||
|
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
|
||||||
|
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
|
||||||
|
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
|
||||||
|
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
|
||||||
|
|
||||||
|
self.assertEqual(sanitize_path('../abc'), '..\\abc')
|
||||||
|
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
|
||||||
|
self.assertEqual(sanitize_path('./abc'), 'abc')
|
||||||
|
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
|
||||||
|
|
||||||
|
def test_sanitize_url_path_consecutive_slashes(self):
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_url_path_consecutive_slashes('http://hostname/foo//bar/filename.html'),
|
||||||
|
'http://hostname/foo/bar/filename.html')
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_url_path_consecutive_slashes('http://hostname//foo/bar/filename.html'),
|
||||||
|
'http://hostname/foo/bar/filename.html')
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_url_path_consecutive_slashes('http://hostname//'),
|
||||||
|
'http://hostname/')
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_url_path_consecutive_slashes('http://hostname/foo/bar/filename.html'),
|
||||||
|
'http://hostname/foo/bar/filename.html')
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_url_path_consecutive_slashes('http://hostname/'),
|
||||||
|
'http://hostname/')
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_url_path_consecutive_slashes('http://hostname/abc//'),
|
||||||
|
'http://hostname/abc/')
|
||||||
|
|
||||||
def test_ordered_set(self):
|
def test_ordered_set(self):
|
||||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||||
self.assertEqual(orderedSet([]), [])
|
self.assertEqual(orderedSet([]), [])
|
||||||
@@ -120,7 +199,11 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
||||||
|
|
||||||
def test_unescape_html(self):
|
def test_unescape_html(self):
|
||||||
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
|
self.assertEqual(unescapeHTML('%20;'), '%20;')
|
||||||
|
self.assertEqual(unescapeHTML('/'), '/')
|
||||||
|
self.assertEqual(unescapeHTML('/'), '/')
|
||||||
|
self.assertEqual(
|
||||||
|
unescapeHTML('é'), 'é')
|
||||||
|
|
||||||
def test_daterange(self):
|
def test_daterange(self):
|
||||||
_20century = DateRange("19000101", "20000101")
|
_20century = DateRange("19000101", "20000101")
|
||||||
@@ -135,10 +218,18 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
|
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
|
||||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||||
|
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
|
||||||
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||||
|
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
||||||
|
self.assertEqual(
|
||||||
|
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
|
||||||
|
'20141126')
|
||||||
|
self.assertEqual(
|
||||||
|
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
|
||||||
|
'20150202')
|
||||||
|
|
||||||
def test_find_xpath_attr(self):
|
def test_find_xpath_attr(self):
|
||||||
testxml = u'''<root>
|
testxml = '''<root>
|
||||||
<node/>
|
<node/>
|
||||||
<node x="a"/>
|
<node x="a"/>
|
||||||
<node x="a" y="c" />
|
<node x="a" y="c" />
|
||||||
@@ -150,19 +241,8 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
||||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
||||||
|
|
||||||
def test_meta_parser(self):
|
|
||||||
testhtml = u'''
|
|
||||||
<head>
|
|
||||||
<meta name="description" content="foo & bar">
|
|
||||||
<meta content='Plato' name='author'/>
|
|
||||||
</head>
|
|
||||||
'''
|
|
||||||
get_meta = lambda name: get_meta_content(name, testhtml)
|
|
||||||
self.assertEqual(get_meta('description'), u'foo & bar')
|
|
||||||
self.assertEqual(get_meta('author'), 'Plato')
|
|
||||||
|
|
||||||
def test_xpath_with_ns(self):
|
def test_xpath_with_ns(self):
|
||||||
testxml = u'''<root xmlns:media="http://example.com/">
|
testxml = '''<root xmlns:media="http://example.com/">
|
||||||
<media:song>
|
<media:song>
|
||||||
<media:author>The Author</media:author>
|
<media:author>The Author</media:author>
|
||||||
<url>http://server.com/download.mp3</url>
|
<url>http://server.com/download.mp3</url>
|
||||||
@@ -171,11 +251,22 @@ class TestUtil(unittest.TestCase):
|
|||||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||||
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
|
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
|
||||||
self.assertTrue(find('media:song') is not None)
|
self.assertTrue(find('media:song') is not None)
|
||||||
self.assertEqual(find('media:song/media:author').text, u'The Author')
|
self.assertEqual(find('media:song/media:author').text, 'The Author')
|
||||||
self.assertEqual(find('media:song/url').text, u'http://server.com/download.mp3')
|
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
|
||||||
|
|
||||||
|
def test_xpath_text(self):
|
||||||
|
testxml = '''<root>
|
||||||
|
<div>
|
||||||
|
<p>Foo</p>
|
||||||
|
</div>
|
||||||
|
</root>'''
|
||||||
|
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||||
|
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
|
||||||
|
self.assertTrue(xpath_text(doc, 'div/bar') is None)
|
||||||
|
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
|
||||||
|
|
||||||
def test_smuggle_url(self):
|
def test_smuggle_url(self):
|
||||||
data = {u"ö": u"ö", u"abc": [3]}
|
data = {"ö": "ö", "abc": [3]}
|
||||||
url = 'https://foo.bar/baz?x=y#a'
|
url = 'https://foo.bar/baz?x=y#a'
|
||||||
smug_url = smuggle_url(url, data)
|
smug_url = smuggle_url(url, data)
|
||||||
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
|
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
|
||||||
@@ -187,25 +278,27 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(res_data, None)
|
self.assertEqual(res_data, None)
|
||||||
|
|
||||||
def test_shell_quote(self):
|
def test_shell_quote(self):
|
||||||
args = ['ffmpeg', '-i', encodeFilename(u'ñ€ß\'.mp4')]
|
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
|
||||||
self.assertEqual(shell_quote(args), u"""ffmpeg -i 'ñ€ß'"'"'.mp4'""")
|
self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""")
|
||||||
|
|
||||||
def test_str_to_int(self):
|
def test_str_to_int(self):
|
||||||
self.assertEqual(str_to_int('123,456'), 123456)
|
self.assertEqual(str_to_int('123,456'), 123456)
|
||||||
self.assertEqual(str_to_int('123.456'), 123456)
|
self.assertEqual(str_to_int('123.456'), 123456)
|
||||||
|
|
||||||
def test_url_basename(self):
|
def test_url_basename(self):
|
||||||
self.assertEqual(url_basename(u'http://foo.de/'), u'')
|
self.assertEqual(url_basename('http://foo.de/'), '')
|
||||||
self.assertEqual(url_basename(u'http://foo.de/bar/baz'), u'baz')
|
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
|
||||||
self.assertEqual(url_basename(u'http://foo.de/bar/baz?x=y'), u'baz')
|
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
|
||||||
self.assertEqual(url_basename(u'http://foo.de/bar/baz#x=y'), u'baz')
|
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
|
||||||
self.assertEqual(url_basename(u'http://foo.de/bar/baz/'), u'baz')
|
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
|
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
|
||||||
u'trailer.mp4')
|
'trailer.mp4')
|
||||||
|
|
||||||
def test_parse_duration(self):
|
def test_parse_duration(self):
|
||||||
self.assertEqual(parse_duration(None), None)
|
self.assertEqual(parse_duration(None), None)
|
||||||
|
self.assertEqual(parse_duration(False), None)
|
||||||
|
self.assertEqual(parse_duration('invalid'), None)
|
||||||
self.assertEqual(parse_duration('1'), 1)
|
self.assertEqual(parse_duration('1'), 1)
|
||||||
self.assertEqual(parse_duration('1337:12'), 80232)
|
self.assertEqual(parse_duration('1337:12'), 80232)
|
||||||
self.assertEqual(parse_duration('9:12:43'), 33163)
|
self.assertEqual(parse_duration('9:12:43'), 33163)
|
||||||
@@ -213,12 +306,23 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_duration('00:01:01'), 61)
|
self.assertEqual(parse_duration('00:01:01'), 61)
|
||||||
self.assertEqual(parse_duration('x:y'), None)
|
self.assertEqual(parse_duration('x:y'), None)
|
||||||
self.assertEqual(parse_duration('3h11m53s'), 11513)
|
self.assertEqual(parse_duration('3h11m53s'), 11513)
|
||||||
|
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
|
||||||
|
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
|
||||||
|
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
|
||||||
self.assertEqual(parse_duration('62m45s'), 3765)
|
self.assertEqual(parse_duration('62m45s'), 3765)
|
||||||
self.assertEqual(parse_duration('6m59s'), 419)
|
self.assertEqual(parse_duration('6m59s'), 419)
|
||||||
self.assertEqual(parse_duration('49s'), 49)
|
self.assertEqual(parse_duration('49s'), 49)
|
||||||
self.assertEqual(parse_duration('0h0m0s'), 0)
|
self.assertEqual(parse_duration('0h0m0s'), 0)
|
||||||
self.assertEqual(parse_duration('0m0s'), 0)
|
self.assertEqual(parse_duration('0m0s'), 0)
|
||||||
self.assertEqual(parse_duration('0s'), 0)
|
self.assertEqual(parse_duration('0s'), 0)
|
||||||
|
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
|
||||||
|
self.assertEqual(parse_duration('T30M38S'), 1838)
|
||||||
|
self.assertEqual(parse_duration('5 s'), 5)
|
||||||
|
self.assertEqual(parse_duration('3 min'), 180)
|
||||||
|
self.assertEqual(parse_duration('2.5 hours'), 9000)
|
||||||
|
self.assertEqual(parse_duration('02:03:04'), 7384)
|
||||||
|
self.assertEqual(parse_duration('01:02:03:04'), 93784)
|
||||||
|
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
|
||||||
|
|
||||||
def test_fix_xml_ampersands(self):
|
def test_fix_xml_ampersands(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@@ -241,10 +345,14 @@ class TestUtil(unittest.TestCase):
|
|||||||
for i in range(firstid, upto):
|
for i in range(firstid, upto):
|
||||||
yield i
|
yield i
|
||||||
|
|
||||||
pl = PagedList(get_page, pagesize)
|
pl = OnDemandPagedList(get_page, pagesize)
|
||||||
got = pl.getslice(*sliceargs)
|
got = pl.getslice(*sliceargs)
|
||||||
self.assertEqual(got, expected)
|
self.assertEqual(got, expected)
|
||||||
|
|
||||||
|
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
|
||||||
|
got = iapl.getslice(*sliceargs)
|
||||||
|
self.assertEqual(got, expected)
|
||||||
|
|
||||||
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
||||||
testPL(5, 2, (1,), [1, 2, 3, 4])
|
testPL(5, 2, (1,), [1, 2, 3, 4])
|
||||||
testPL(5, 2, (2,), [2, 3, 4])
|
testPL(5, 2, (2,), [2, 3, 4])
|
||||||
@@ -255,16 +363,16 @@ class TestUtil(unittest.TestCase):
|
|||||||
testPL(5, 2, (20, 99), [])
|
testPL(5, 2, (20, 99), [])
|
||||||
|
|
||||||
def test_struct_unpack(self):
|
def test_struct_unpack(self):
|
||||||
self.assertEqual(struct_unpack(u'!B', b'\x00'), (0,))
|
self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
|
||||||
|
|
||||||
def test_read_batch_urls(self):
|
def test_read_batch_urls(self):
|
||||||
f = io.StringIO(u'''\xef\xbb\xbf foo
|
f = io.StringIO('''\xef\xbb\xbf foo
|
||||||
bar\r
|
bar\r
|
||||||
baz
|
baz
|
||||||
# More after this line\r
|
# More after this line\r
|
||||||
; or after this
|
; or after this
|
||||||
bam''')
|
bam''')
|
||||||
self.assertEqual(read_batch_urls(f), [u'foo', u'bar', u'baz', u'bam'])
|
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
|
||||||
|
|
||||||
def test_urlencode_postdata(self):
|
def test_urlencode_postdata(self):
|
||||||
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
|
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
|
||||||
@@ -274,15 +382,198 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
||||||
|
|
||||||
def test_strip_jsonp(self):
|
def test_strip_jsonp(self):
|
||||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||||
d = json.loads(stripped)
|
d = json.loads(stripped)
|
||||||
self.assertEqual(d, [{"id": "532cb", "x": 3}])
|
self.assertEqual(d, [{"id": "532cb", "x": 3}])
|
||||||
|
|
||||||
def test_uppercase_escpae(self):
|
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
|
||||||
self.assertEqual(uppercase_escape(u'aä'), u'aä')
|
d = json.loads(stripped)
|
||||||
self.assertEqual(uppercase_escape(u'\\U0001d550'), u'𝕐')
|
self.assertEqual(d, {'STATUS': 'OK'})
|
||||||
|
|
||||||
|
def test_uppercase_escape(self):
|
||||||
|
self.assertEqual(uppercase_escape('aä'), 'aä')
|
||||||
|
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
|
||||||
|
|
||||||
|
def test_limit_length(self):
|
||||||
|
self.assertEqual(limit_length(None, 12), None)
|
||||||
|
self.assertEqual(limit_length('foo', 12), 'foo')
|
||||||
|
self.assertTrue(
|
||||||
|
limit_length('foo bar baz asd', 12).startswith('foo bar'))
|
||||||
|
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
|
||||||
|
|
||||||
|
def test_escape_rfc3986(self):
|
||||||
|
reserved = "!*'();:@&=+$,/?#[]"
|
||||||
|
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
|
||||||
|
self.assertEqual(escape_rfc3986(reserved), reserved)
|
||||||
|
self.assertEqual(escape_rfc3986(unreserved), unreserved)
|
||||||
|
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
|
||||||
|
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
|
||||||
|
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
|
||||||
|
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
|
||||||
|
|
||||||
|
def test_escape_url(self):
|
||||||
|
self.assertEqual(
|
||||||
|
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
|
||||||
|
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
|
||||||
|
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
escape_url('http://тест.рф/фрагмент'),
|
||||||
|
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
escape_url('http://тест.рф/абв?абв=абв#абв'),
|
||||||
|
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
||||||
|
)
|
||||||
|
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||||
|
|
||||||
|
def test_js_to_json_realworld(self):
|
||||||
|
inp = '''{
|
||||||
|
'clip':{'provider':'pseudo'}
|
||||||
|
}'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''{
|
||||||
|
"clip":{"provider":"pseudo"}
|
||||||
|
}''')
|
||||||
|
json.loads(js_to_json(inp))
|
||||||
|
|
||||||
|
inp = '''{
|
||||||
|
'playlist':[{'controls':{'all':null}}]
|
||||||
|
}'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''{
|
||||||
|
"playlist":[{"controls":{"all":null}}]
|
||||||
|
}''')
|
||||||
|
|
||||||
|
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
|
||||||
|
json_code = js_to_json(inp)
|
||||||
|
self.assertEqual(json.loads(json_code), json.loads(inp))
|
||||||
|
|
||||||
|
def test_js_to_json_edgecases(self):
|
||||||
|
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||||
|
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||||
|
|
||||||
|
on = js_to_json('{"abc": true}')
|
||||||
|
self.assertEqual(json.loads(on), {'abc': True})
|
||||||
|
|
||||||
|
# Ignore JavaScript code as well
|
||||||
|
on = js_to_json('''{
|
||||||
|
"x": 1,
|
||||||
|
y: "a",
|
||||||
|
z: some.code
|
||||||
|
}''')
|
||||||
|
d = json.loads(on)
|
||||||
|
self.assertEqual(d['x'], 1)
|
||||||
|
self.assertEqual(d['y'], 'a')
|
||||||
|
|
||||||
|
def test_clean_html(self):
|
||||||
|
self.assertEqual(clean_html('a:\nb'), 'a: b')
|
||||||
|
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
|
||||||
|
|
||||||
|
def test_intlist_to_bytes(self):
|
||||||
|
self.assertEqual(
|
||||||
|
intlist_to_bytes([0, 1, 127, 128, 255]),
|
||||||
|
b'\x00\x01\x7f\x80\xff')
|
||||||
|
|
||||||
|
def test_args_to_str(self):
|
||||||
|
self.assertEqual(
|
||||||
|
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
|
||||||
|
'foo ba/r -baz \'2 be\' \'\''
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_parse_filesize(self):
|
||||||
|
self.assertEqual(parse_filesize(None), None)
|
||||||
|
self.assertEqual(parse_filesize(''), None)
|
||||||
|
self.assertEqual(parse_filesize('91 B'), 91)
|
||||||
|
self.assertEqual(parse_filesize('foobar'), None)
|
||||||
|
self.assertEqual(parse_filesize('2 MiB'), 2097152)
|
||||||
|
self.assertEqual(parse_filesize('5 GB'), 5000000000)
|
||||||
|
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
|
||||||
|
self.assertEqual(parse_filesize('1,24 KB'), 1240)
|
||||||
|
|
||||||
|
def test_version_tuple(self):
|
||||||
|
self.assertEqual(version_tuple('1'), (1,))
|
||||||
|
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
|
||||||
|
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
|
||||||
|
|
||||||
|
def test_detect_exe_version(self):
|
||||||
|
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
|
||||||
|
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
|
||||||
|
configuration: --prefix=/usr --extra-'''), '1.2.1')
|
||||||
|
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
|
||||||
|
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
|
||||||
|
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
|
||||||
|
Trying to open render node...
|
||||||
|
Success at /dev/dri/renderD128.
|
||||||
|
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
||||||
|
|
||||||
|
def test_age_restricted(self):
|
||||||
|
self.assertFalse(age_restricted(None, 10)) # unrestricted content
|
||||||
|
self.assertFalse(age_restricted(1, None)) # unrestricted policy
|
||||||
|
self.assertFalse(age_restricted(8, 10))
|
||||||
|
self.assertTrue(age_restricted(18, 14))
|
||||||
|
self.assertFalse(age_restricted(18, 18))
|
||||||
|
|
||||||
|
def test_is_html(self):
|
||||||
|
self.assertFalse(is_html(b'\x49\x44\x43<html'))
|
||||||
|
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
|
||||||
|
self.assertTrue(is_html( # UTF-8 with BOM
|
||||||
|
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
|
||||||
|
self.assertTrue(is_html( # UTF-16-LE
|
||||||
|
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
|
||||||
|
))
|
||||||
|
self.assertTrue(is_html( # UTF-16-BE
|
||||||
|
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
|
||||||
|
))
|
||||||
|
self.assertTrue(is_html( # UTF-32-BE
|
||||||
|
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
|
||||||
|
self.assertTrue(is_html( # UTF-32-LE
|
||||||
|
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
|
||||||
|
|
||||||
|
def test_render_table(self):
|
||||||
|
self.assertEqual(
|
||||||
|
render_table(
|
||||||
|
['a', 'bcd'],
|
||||||
|
[[123, 4], [9999, 51]]),
|
||||||
|
'a bcd\n'
|
||||||
|
'123 4\n'
|
||||||
|
'9999 51')
|
||||||
|
|
||||||
|
def test_match_str(self):
|
||||||
|
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
|
||||||
|
self.assertFalse(match_str('xy', {'x': 1200}))
|
||||||
|
self.assertTrue(match_str('!xy', {'x': 1200}))
|
||||||
|
self.assertTrue(match_str('x', {'x': 1200}))
|
||||||
|
self.assertFalse(match_str('!x', {'x': 1200}))
|
||||||
|
self.assertTrue(match_str('x', {'x': 0}))
|
||||||
|
self.assertFalse(match_str('x>0', {'x': 0}))
|
||||||
|
self.assertFalse(match_str('x>0', {}))
|
||||||
|
self.assertTrue(match_str('x>?0', {}))
|
||||||
|
self.assertTrue(match_str('x>1K', {'x': 1200}))
|
||||||
|
self.assertFalse(match_str('x>2K', {'x': 1200}))
|
||||||
|
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
|
||||||
|
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
|
||||||
|
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 90, 'description': 'foo'}))
|
||||||
|
self.assertTrue(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 190, 'description': 'foo'}))
|
||||||
|
self.assertFalse(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
|
||||||
|
self.assertFalse(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 190, 'dislike_count': 10}))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@@ -31,17 +32,16 @@ params = get_params({
|
|||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
TEST_ID = 'gr51aVj-mLg'
|
TEST_ID = 'gr51aVj-mLg'
|
||||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
||||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||||
|
|
||||||
|
|
||||||
class TestAnnotations(unittest.TestCase):
|
class TestAnnotations(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
# Clear old files
|
# Clear old files
|
||||||
self.tearDown()
|
self.tearDown()
|
||||||
|
|
||||||
|
|
||||||
def test_info_json(self):
|
def test_info_json(self):
|
||||||
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
|
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
|
||||||
ie = youtube_dl.extractor.YoutubeIE()
|
ie = youtube_dl.extractor.YoutubeIE()
|
||||||
@@ -71,7 +71,6 @@ class TestAnnotations(unittest.TestCase):
|
|||||||
# We should have seen (and removed) all the expected annotation texts.
|
# We should have seen (and removed) all the expected annotation texts.
|
||||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
||||||
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
try_rm(ANNOTATIONS_FILE)
|
try_rm(ANNOTATIONS_FILE)
|
||||||
|
|
||||||
|
|||||||
@@ -1,75 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
from test.helper import get_params
|
|
||||||
|
|
||||||
|
|
||||||
import io
|
|
||||||
import json
|
|
||||||
|
|
||||||
import youtube_dl.YoutubeDL
|
|
||||||
import youtube_dl.extractor
|
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
|
||||||
self.to_stderr = self.to_screen
|
|
||||||
|
|
||||||
params = get_params({
|
|
||||||
'writeinfojson': True,
|
|
||||||
'skip_download': True,
|
|
||||||
'writedescription': True,
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
TEST_ID = 'BaW_jenozKc'
|
|
||||||
INFO_JSON_FILE = TEST_ID + '.info.json'
|
|
||||||
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
|
|
||||||
EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐
|
|
||||||
test URL: https://github.com/rg3/youtube-dl/issues/1892
|
|
||||||
|
|
||||||
This is a test video for youtube-dl.
|
|
||||||
|
|
||||||
For more information, contact phihag@phihag.de .'''
|
|
||||||
|
|
||||||
|
|
||||||
class TestInfoJSON(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
# Clear old files
|
|
||||||
self.tearDown()
|
|
||||||
|
|
||||||
def test_info_json(self):
|
|
||||||
ie = youtube_dl.extractor.YoutubeIE()
|
|
||||||
ydl = YoutubeDL(params)
|
|
||||||
ydl.add_info_extractor(ie)
|
|
||||||
ydl.download([TEST_ID])
|
|
||||||
self.assertTrue(os.path.exists(INFO_JSON_FILE))
|
|
||||||
with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
|
|
||||||
jd = json.load(jsonf)
|
|
||||||
self.assertEqual(jd['upload_date'], u'20121002')
|
|
||||||
self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
|
|
||||||
self.assertEqual(jd['id'], TEST_ID)
|
|
||||||
self.assertEqual(jd['extractor'], 'youtube')
|
|
||||||
self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
|
|
||||||
self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
|
|
||||||
|
|
||||||
self.assertTrue(os.path.exists(DESCRIPTION_FILE))
|
|
||||||
with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf:
|
|
||||||
descr = descf.read()
|
|
||||||
self.assertEqual(descr, EXPECTED_DESCRIPTION)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
if os.path.exists(INFO_JSON_FILE):
|
|
||||||
os.remove(INFO_JSON_FILE)
|
|
||||||
if os.path.exists(DESCRIPTION_FILE):
|
|
||||||
os.remove(DESCRIPTION_FILE)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@@ -10,13 +11,8 @@ from test.helper import FakeYDL
|
|||||||
|
|
||||||
|
|
||||||
from youtube_dl.extractor import (
|
from youtube_dl.extractor import (
|
||||||
YoutubeUserIE,
|
|
||||||
YoutubePlaylistIE,
|
YoutubePlaylistIE,
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
YoutubeChannelIE,
|
|
||||||
YoutubeShowIE,
|
|
||||||
YoutubeTopListIE,
|
|
||||||
YoutubeSearchURLIE,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -25,15 +21,6 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
"""Make sure the info has '_type' set to 'playlist'"""
|
"""Make sure the info has '_type' set to 'playlist'"""
|
||||||
self.assertEqual(info['_type'], 'playlist')
|
self.assertEqual(info['_type'], 'playlist')
|
||||||
|
|
||||||
def test_youtube_playlist(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'ytdl test PL')
|
|
||||||
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
|
||||||
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
|
||||||
|
|
||||||
def test_youtube_playlist_noplaylist(self):
|
def test_youtube_playlist_noplaylist(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
dl.params['noplaylist'] = True
|
dl.params['noplaylist'] = True
|
||||||
@@ -42,35 +29,6 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
self.assertEqual(result['_type'], 'url')
|
self.assertEqual(result['_type'], 'url')
|
||||||
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||||
|
|
||||||
def test_issue_673(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
result = ie.extract('PLBB231211A4F62143')
|
|
||||||
self.assertTrue(len(result['entries']) > 25)
|
|
||||||
|
|
||||||
def test_youtube_playlist_long(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertTrue(len(result['entries']) >= 799)
|
|
||||||
|
|
||||||
def test_youtube_playlist_with_deleted(self):
|
|
||||||
#651
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
|
||||||
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
|
||||||
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
|
||||||
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
|
||||||
|
|
||||||
def test_youtube_playlist_empty(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(len(result['entries']), 0)
|
|
||||||
|
|
||||||
def test_youtube_course(self):
|
def test_youtube_course(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
@@ -81,34 +39,6 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
self.assertEqual(len(entries), 25)
|
self.assertEqual(len(entries), 25)
|
||||||
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||||
|
|
||||||
def test_youtube_channel(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubeChannelIE(dl)
|
|
||||||
#test paginated channel
|
|
||||||
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
|
|
||||||
self.assertTrue(len(result['entries']) > 90)
|
|
||||||
#test autogenerated channel
|
|
||||||
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
|
||||||
self.assertTrue(len(result['entries']) >= 18)
|
|
||||||
|
|
||||||
def test_youtube_user(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubeUserIE(dl)
|
|
||||||
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
|
|
||||||
self.assertTrue(len(result['entries']) >= 320)
|
|
||||||
|
|
||||||
def test_youtube_safe_search(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubePlaylistIE(dl)
|
|
||||||
result = ie.extract('PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl')
|
|
||||||
self.assertEqual(len(result['entries']), 2)
|
|
||||||
|
|
||||||
def test_youtube_show(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubeShowIE(dl)
|
|
||||||
result = ie.extract('http://www.youtube.com/show/airdisasters')
|
|
||||||
self.assertTrue(len(result) >= 3)
|
|
||||||
|
|
||||||
def test_youtube_mix(self):
|
def test_youtube_mix(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
@@ -127,21 +57,5 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
entries = result['entries']
|
entries = result['entries']
|
||||||
self.assertEqual(len(entries), 100)
|
self.assertEqual(len(entries), 100)
|
||||||
|
|
||||||
def test_youtube_toplist(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubeTopListIE(dl)
|
|
||||||
result = ie.extract('yttoplist:music:Trending')
|
|
||||||
entries = result['entries']
|
|
||||||
self.assertTrue(len(entries) >= 5)
|
|
||||||
|
|
||||||
def test_youtube_search_url(self):
|
|
||||||
dl = FakeYDL()
|
|
||||||
ie = YoutubeSearchURLIE(dl)
|
|
||||||
result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
|
|
||||||
entries = result['entries']
|
|
||||||
self.assertIsPlaylist(result)
|
|
||||||
self.assertEqual(result['title'], 'youtube-dl test video')
|
|
||||||
self.assertTrue(len(entries) >= 5)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -1,44 +1,76 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import io
|
import io
|
||||||
import re
|
import re
|
||||||
import string
|
import string
|
||||||
|
|
||||||
|
from test.helper import FakeYDL
|
||||||
from youtube_dl.extractor import YoutubeIE
|
from youtube_dl.extractor import YoutubeIE
|
||||||
from youtube_dl.utils import compat_str, compat_urlretrieve
|
from youtube_dl.compat import compat_str, compat_urlretrieve
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
(
|
(
|
||||||
u'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
|
||||||
u'js',
|
'js',
|
||||||
86,
|
86,
|
||||||
u'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
|
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
u'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
|
||||||
u'js',
|
'js',
|
||||||
85,
|
85,
|
||||||
u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
|
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
u'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
|
||||||
u'js',
|
'js',
|
||||||
90,
|
90,
|
||||||
u']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
|
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
u'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
|
||||||
u'js',
|
'js',
|
||||||
u'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
|
84,
|
||||||
u'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
|
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
|
||||||
|
'js',
|
||||||
|
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
|
||||||
|
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
||||||
|
'js',
|
||||||
|
84,
|
||||||
|
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
|
||||||
|
'js',
|
||||||
|
83,
|
||||||
|
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
|
||||||
|
'js',
|
||||||
|
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
|
||||||
|
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
|
||||||
|
'js',
|
||||||
|
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
|
||||||
|
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
|
||||||
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -51,18 +83,19 @@ class TestSignature(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
def make_tfunc(url, stype, sig_input, expected_sig):
|
def make_tfunc(url, stype, sig_input, expected_sig):
|
||||||
basename = url.rpartition('/')[2]
|
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
|
||||||
m = re.match(r'.*-([a-zA-Z0-9_-]+)\.[a-z]+$', basename)
|
assert m, '%r should follow URL format' % url
|
||||||
assert m, '%r should follow URL format' % basename
|
|
||||||
test_id = m.group(1)
|
test_id = m.group(1)
|
||||||
|
|
||||||
def test_func(self):
|
def test_func(self):
|
||||||
|
basename = 'player-%s.%s' % (test_id, stype)
|
||||||
fn = os.path.join(self.TESTDATA_DIR, basename)
|
fn = os.path.join(self.TESTDATA_DIR, basename)
|
||||||
|
|
||||||
if not os.path.exists(fn):
|
if not os.path.exists(fn):
|
||||||
compat_urlretrieve(url, fn)
|
compat_urlretrieve(url, fn)
|
||||||
|
|
||||||
ie = YoutubeIE()
|
ydl = FakeYDL()
|
||||||
|
ie = YoutubeIE(ydl)
|
||||||
if stype == 'js':
|
if stype == 'js':
|
||||||
with io.open(fn, encoding='utf-8') as testf:
|
with io.open(fn, encoding='utf-8') as testf:
|
||||||
jscode = testf.read()
|
jscode = testf.read()
|
||||||
|
|||||||
52
test/testcert.pem
Normal file
52
test/testcert.pem
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDMF0bAzaHAdIyB
|
||||||
|
HRmnIp4vv40lGqEePmWqicCl0QZ0wsb5dNysSxSa7330M2QeQopGfdaUYF1uTcNp
|
||||||
|
Qx6ECgBSfg+RrOBI7r/u4F+sKX8MUXVaf/5QoBUrGNGSn/pp7HMGOuQqO6BVg4+h
|
||||||
|
A1ySSwUG8mZItLRry1ISyErmW8b9xlqfd97uLME/5tX+sMelRFjUbAx8A4CK58Ev
|
||||||
|
mMguHVTlXzx5RMdYcf1VScYcjlV/qA45uzP8zwI5aigfcmUD+tbGuQRhKxUhmw0J
|
||||||
|
aobtOR6+JSOAULW5gYa/egE4dWLwbyM6b6eFbdnjlQzEA1EW7ChMPAW/Mo83KyiP
|
||||||
|
tKMCSQulAgMBAAECggEALCfBDAexPjU5DNoh6bIorUXxIJzxTNzNHCdvgbCGiA54
|
||||||
|
BBKPh8s6qwazpnjT6WQWDIg/O5zZufqjE4wM9x4+0Zoqfib742ucJO9wY4way6x4
|
||||||
|
Clt0xzbLPabB+MoZ4H7ip+9n2+dImhe7pGdYyOHoNYeOL57BBi1YFW42Hj6u/8pd
|
||||||
|
63YCXisto3Rz1YvRQVjwsrS+cRKZlzAFQRviL30jav7Wh1aWEfcXxjj4zhm8pJdk
|
||||||
|
ITGtq6howz57M0NtX6hZnfe8ywzTnDFIGKIMA2cYHuYJcBh9bc4tCGubTvTKK9UE
|
||||||
|
8fM+f6UbfGqfpKCq1mcgs0XMoFDSzKS9+mSJn0+5JQKBgQD+OCKaeH3Yzw5zGnlw
|
||||||
|
XuQfMJGNcgNr+ImjmvzUAC2fAZUJLAcQueE5kzMv5Fmd+EFE2CEX1Vit3tg0SXvA
|
||||||
|
G+bq609doILHMA03JHnV1npO/YNIhG3AAtJlKYGxQNfWH9mflYj9mEui8ZFxG52o
|
||||||
|
zWhHYuifOjjZszUR+/eio6NPzwKBgQDNhUBTrT8LIX4SE/EFUiTlYmWIvOMgXYvN
|
||||||
|
8Cm3IRNQ/yyphZaXEU0eJzfX5uCDfSVOgd6YM/2pRah+t+1Hvey4H8e0GVTu5wMP
|
||||||
|
gkkqwKPGIR1YOmlw6ippqwvoJD7LuYrm6Q4D6e1PvkjwCq6lEndrOPmPrrXNd0JJ
|
||||||
|
XO60y3U2SwKBgQDLkyZarryQXxcCI6Q10Tc6pskYDMIit095PUbTeiUOXNT9GE28
|
||||||
|
Hi32ziLCakk9kCysNasii81MxtQ54tJ/f5iGbNMMddnkKl2a19Hc5LjjAm4cJzg/
|
||||||
|
98KGEhvyVqvAo5bBDZ06/rcrD+lZOzUglQS5jcIcqCIYa0LHWQ/wJLxFzwKBgFcZ
|
||||||
|
1SRhdSmDfUmuF+S4ZpistflYjC3IV5rk4NkS9HvMWaJS0nqdw4A3AMzItXgkjq4S
|
||||||
|
DkOVLTkTI5Do5HAWRv/VwC5M2hkR4NMu1VGAKSisGiKtRsirBWSZMEenLNHshbjN
|
||||||
|
Jrpz5rZ4H7NT46ZkCCZyFBpX4gb9NyOedjA7Via3AoGARF8RxbYjnEGGFuhnbrJB
|
||||||
|
FTPR0vaL4faY3lOgRZ8jOG9V2c9Hzi/y8a8TU4C11jnJSDqYCXBTd5XN28npYxtD
|
||||||
|
pjRsCwy6ze+yvYXPO7C978eMG3YRyj366NXUxnXN59ibwe/lxi2OD9z8J1LEdF6z
|
||||||
|
VJua1Wn8HKxnXMI61DhTCSo=
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIEEzCCAvugAwIBAgIJAK1haYi6gmSKMA0GCSqGSIb3DQEBCwUAMIGeMQswCQYD
|
||||||
|
VQQGEwJERTEMMAoGA1UECAwDTlJXMRQwEgYDVQQHDAtEdWVzc2VsZG9yZjEbMBkG
|
||||||
|
A1UECgwSeW91dHViZS1kbCBwcm9qZWN0MRkwFwYDVQQLDBB5b3V0dWJlLWRsIHRl
|
||||||
|
c3RzMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEHBoaWhhZ0Bw
|
||||||
|
aGloYWcuZGUwIBcNMTUwMTMwMDExNTA4WhgPMjExNTAxMDYwMTE1MDhaMIGeMQsw
|
||||||
|
CQYDVQQGEwJERTEMMAoGA1UECAwDTlJXMRQwEgYDVQQHDAtEdWVzc2VsZG9yZjEb
|
||||||
|
MBkGA1UECgwSeW91dHViZS1kbCBwcm9qZWN0MRkwFwYDVQQLDBB5b3V0dWJlLWRs
|
||||||
|
IHRlc3RzMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEHBoaWhh
|
||||||
|
Z0BwaGloYWcuZGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDMF0bA
|
||||||
|
zaHAdIyBHRmnIp4vv40lGqEePmWqicCl0QZ0wsb5dNysSxSa7330M2QeQopGfdaU
|
||||||
|
YF1uTcNpQx6ECgBSfg+RrOBI7r/u4F+sKX8MUXVaf/5QoBUrGNGSn/pp7HMGOuQq
|
||||||
|
O6BVg4+hA1ySSwUG8mZItLRry1ISyErmW8b9xlqfd97uLME/5tX+sMelRFjUbAx8
|
||||||
|
A4CK58EvmMguHVTlXzx5RMdYcf1VScYcjlV/qA45uzP8zwI5aigfcmUD+tbGuQRh
|
||||||
|
KxUhmw0JaobtOR6+JSOAULW5gYa/egE4dWLwbyM6b6eFbdnjlQzEA1EW7ChMPAW/
|
||||||
|
Mo83KyiPtKMCSQulAgMBAAGjUDBOMB0GA1UdDgQWBBTBUZoqhQkzHQ6xNgZfFxOd
|
||||||
|
ZEVt8TAfBgNVHSMEGDAWgBTBUZoqhQkzHQ6xNgZfFxOdZEVt8TAMBgNVHRMEBTAD
|
||||||
|
AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCUOCl3T/J9B08Z+ijfOJAtkbUaEHuVZb4x
|
||||||
|
5EpZSy2ZbkLvtsftMFieHVNXn9dDswQc5qjYStCC4o60LKw4M6Y63FRsAZ/DNaqb
|
||||||
|
PY3jyCyuugZ8/sNf50vHYkAcF7SQYqOQFQX4TQsNUk2xMJIt7H0ErQFmkf/u3dg6
|
||||||
|
cy89zkT462IwxzSG7NNhIlRkL9o5qg+Y1mF9eZA1B0rcL6hO24PPTHOd90HDChBu
|
||||||
|
SZ6XMi/LzYQSTf0Vg2R+uMIVlzSlkdcZ6sqVnnqeLL8dFyIa4e9sj/D4ZCYP8Mqe
|
||||||
|
Z73H5/NNhmwCHRqVUTgm307xblQaWGhwAiDkaRvRW2aJQ0qGEdZK
|
||||||
|
-----END CERTIFICATE-----
|
||||||
7
tox.ini
7
tox.ini
@@ -1,8 +1,11 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py26,py27,py33
|
envlist = py26,py27,py33,py34
|
||||||
[testenv]
|
[testenv]
|
||||||
deps =
|
deps =
|
||||||
nose
|
nose
|
||||||
coverage
|
coverage
|
||||||
commands = nosetests --verbose {posargs:test} # --with-coverage --cover-package=youtube_dl --cover-html
|
defaultargs = test --exclude test_download.py --exclude test_age_restriction.py
|
||||||
|
--exclude test_subtitles.py --exclude test_write_annotations.py
|
||||||
|
--exclude test_youtube_lists.py
|
||||||
|
commands = nosetests --verbose {posargs:{[testenv]defaultargs}} # --with-coverage --cover-package=youtube_dl --cover-html
|
||||||
# test.test_download:TestDownload.test_NowVideo
|
# test.test_download:TestDownload.test_NowVideo
|
||||||
|
|||||||
89
youtube-dl
89
youtube-dl
@@ -1,89 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import sys, os
|
|
||||||
import json, hashlib
|
|
||||||
|
|
||||||
try:
|
|
||||||
import urllib.request as compat_urllib_request
|
|
||||||
except ImportError: # Python 2
|
|
||||||
import urllib2 as compat_urllib_request
|
|
||||||
|
|
||||||
def rsa_verify(message, signature, key):
|
|
||||||
from struct import pack
|
|
||||||
from hashlib import sha256
|
|
||||||
from sys import version_info
|
|
||||||
def b(x):
|
|
||||||
if version_info[0] == 2: return x
|
|
||||||
else: return x.encode('latin1')
|
|
||||||
assert(type(message) == type(b('')))
|
|
||||||
block_size = 0
|
|
||||||
n = key[0]
|
|
||||||
while n:
|
|
||||||
block_size += 1
|
|
||||||
n >>= 8
|
|
||||||
signature = pow(int(signature, 16), key[1], key[0])
|
|
||||||
raw_bytes = []
|
|
||||||
while signature:
|
|
||||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
|
||||||
signature >>= 8
|
|
||||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
|
||||||
if signature[0:2] != b('\x00\x01'): return False
|
|
||||||
signature = signature[2:]
|
|
||||||
if not b('\x00') in signature: return False
|
|
||||||
signature = signature[signature.index(b('\x00'))+1:]
|
|
||||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
|
||||||
signature = signature[19:]
|
|
||||||
if signature != sha256(message).digest(): return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
|
||||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
|
|
||||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
|
|
||||||
|
|
||||||
try:
|
|
||||||
raw_input()
|
|
||||||
except NameError: # Python 3
|
|
||||||
input()
|
|
||||||
|
|
||||||
filename = sys.argv[0]
|
|
||||||
|
|
||||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
|
|
||||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
|
||||||
JSON_URL = UPDATE_URL + 'versions.json'
|
|
||||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
|
||||||
|
|
||||||
if not os.access(filename, os.W_OK):
|
|
||||||
sys.exit('ERROR: no write permissions on %s' % filename)
|
|
||||||
|
|
||||||
try:
|
|
||||||
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
|
|
||||||
versions_info = json.loads(versions_info)
|
|
||||||
except:
|
|
||||||
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
|
|
||||||
if not 'signature' in versions_info:
|
|
||||||
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
|
|
||||||
signature = versions_info['signature']
|
|
||||||
del versions_info['signature']
|
|
||||||
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
|
|
||||||
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
|
|
||||||
|
|
||||||
version = versions_info['versions'][versions_info['latest']]
|
|
||||||
|
|
||||||
try:
|
|
||||||
urlh = compat_urllib_request.urlopen(version['bin'][0])
|
|
||||||
newcontent = urlh.read()
|
|
||||||
urlh.close()
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
sys.exit('ERROR: unable to download latest version')
|
|
||||||
|
|
||||||
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
|
|
||||||
if newcontent_hash != version['bin'][1]:
|
|
||||||
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(filename, 'wb') as outf:
|
|
||||||
outf.write(newcontent)
|
|
||||||
except (IOError, OSError) as err:
|
|
||||||
sys.exit('ERROR: unable to overwrite current version')
|
|
||||||
|
|
||||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
|
|
||||||
BIN
youtube-dl.exe
BIN
youtube-dl.exe
Binary file not shown.
@@ -1,12 +0,0 @@
|
|||||||
# Legacy file for backwards compatibility, use youtube_dl.downloader instead!
|
|
||||||
from .downloader import FileDownloader as RealFileDownloader
|
|
||||||
from .downloader import get_suitable_downloader
|
|
||||||
|
|
||||||
|
|
||||||
# This class reproduces the old behaviour of FileDownloader
|
|
||||||
class FileDownloader(RealFileDownloader):
|
|
||||||
def _do_download(self, filename, info_dict):
|
|
||||||
real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params)
|
|
||||||
for ph in self._progress_hooks:
|
|
||||||
real_fd.add_progress_hook(ph)
|
|
||||||
return real_fd.download(filename, info_dict)
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,92 +1,33 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
__authors__ = (
|
from __future__ import unicode_literals
|
||||||
'Ricardo Garcia Gonzalez',
|
|
||||||
'Danny Colligan',
|
|
||||||
'Benjamin Johnson',
|
|
||||||
'Vasyl\' Vavrychuk',
|
|
||||||
'Witold Baryluk',
|
|
||||||
'Paweł Paprota',
|
|
||||||
'Gergely Imreh',
|
|
||||||
'Rogério Brito',
|
|
||||||
'Philipp Hagemeister',
|
|
||||||
'Sören Schulze',
|
|
||||||
'Kevin Ngo',
|
|
||||||
'Ori Avtalion',
|
|
||||||
'shizeeg',
|
|
||||||
'Filippo Valsorda',
|
|
||||||
'Christian Albrecht',
|
|
||||||
'Dave Vasilevsky',
|
|
||||||
'Jaime Marquínez Ferrándiz',
|
|
||||||
'Jeff Crouse',
|
|
||||||
'Osama Khalid',
|
|
||||||
'Michael Walter',
|
|
||||||
'M. Yasoob Ullah Khalid',
|
|
||||||
'Julien Fraichard',
|
|
||||||
'Johny Mo Swag',
|
|
||||||
'Axel Noack',
|
|
||||||
'Albert Kim',
|
|
||||||
'Pierre Rudloff',
|
|
||||||
'Huarong Huo',
|
|
||||||
'Ismael Mejía',
|
|
||||||
'Steffan \'Ruirize\' James',
|
|
||||||
'Andras Elso',
|
|
||||||
'Jelle van der Waa',
|
|
||||||
'Marcin Cieślak',
|
|
||||||
'Anton Larionov',
|
|
||||||
'Takuya Tsuchida',
|
|
||||||
'Sergey M.',
|
|
||||||
'Michael Orlitzky',
|
|
||||||
'Chris Gahan',
|
|
||||||
'Saimadhav Heblikar',
|
|
||||||
'Mike Col',
|
|
||||||
'Oleg Prutz',
|
|
||||||
'pulpe',
|
|
||||||
'Andreas Schmitz',
|
|
||||||
'Michael Kaiser',
|
|
||||||
'Niklas Laxström',
|
|
||||||
'David Triendl',
|
|
||||||
'Anthony Weems',
|
|
||||||
'David Wagner',
|
|
||||||
'Juan C. Olivares',
|
|
||||||
'Mattias Harrysson',
|
|
||||||
'phaer',
|
|
||||||
'Sainyam Kapoor',
|
|
||||||
'Nicolas Évrard',
|
|
||||||
'Jason Normore',
|
|
||||||
'Hoje Lee',
|
|
||||||
'Adam Thalhammer',
|
|
||||||
'Georg Jähnig',
|
|
||||||
'Ralf Haring',
|
|
||||||
'Koki Takahashi',
|
|
||||||
'Ariset Llerena',
|
|
||||||
'Adam Malcontenti-Wilson',
|
|
||||||
'Tobias Bell',
|
|
||||||
)
|
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
import io
|
import io
|
||||||
import locale
|
|
||||||
import optparse
|
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
|
||||||
import shlex
|
import shlex
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
from .utils import (
|
from .options import (
|
||||||
|
parseOpts,
|
||||||
|
)
|
||||||
|
from .compat import (
|
||||||
|
compat_expanduser,
|
||||||
compat_getpass,
|
compat_getpass,
|
||||||
compat_print,
|
compat_print,
|
||||||
|
workaround_optparse_bug9161,
|
||||||
|
)
|
||||||
|
from .utils import (
|
||||||
DateRange,
|
DateRange,
|
||||||
DEFAULT_OUTTMPL,
|
|
||||||
decodeOption,
|
decodeOption,
|
||||||
get_term_width,
|
DEFAULT_OUTTMPL,
|
||||||
DownloadError,
|
DownloadError,
|
||||||
get_cachedir,
|
match_filter_func,
|
||||||
MaxDownloadsReached,
|
MaxDownloadsReached,
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
@@ -96,472 +37,11 @@ from .utils import (
|
|||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
from .update import update_self
|
from .update import update_self
|
||||||
from .FileDownloader import (
|
from .downloader import (
|
||||||
FileDownloader,
|
FileDownloader,
|
||||||
)
|
)
|
||||||
from .extractor import gen_extractors
|
from .extractor import gen_extractors, list_extractors
|
||||||
from .version import __version__
|
|
||||||
from .YoutubeDL import YoutubeDL
|
from .YoutubeDL import YoutubeDL
|
||||||
from .postprocessor import (
|
|
||||||
AtomicParsleyPP,
|
|
||||||
FFmpegAudioFixPP,
|
|
||||||
FFmpegMetadataPP,
|
|
||||||
FFmpegVideoConvertor,
|
|
||||||
FFmpegExtractAudioPP,
|
|
||||||
FFmpegEmbedSubtitlePP,
|
|
||||||
XAttrMetadataPP,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def parseOpts(overrideArguments=None):
|
|
||||||
def _readOptions(filename_bytes, default=[]):
|
|
||||||
try:
|
|
||||||
optionf = open(filename_bytes)
|
|
||||||
except IOError:
|
|
||||||
return default # silently skip if file is not present
|
|
||||||
try:
|
|
||||||
res = []
|
|
||||||
for l in optionf:
|
|
||||||
res += shlex.split(l, comments=True)
|
|
||||||
finally:
|
|
||||||
optionf.close()
|
|
||||||
return res
|
|
||||||
|
|
||||||
def _readUserConf():
|
|
||||||
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
|
|
||||||
if xdg_config_home:
|
|
||||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
|
|
||||||
if not os.path.isfile(userConfFile):
|
|
||||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
|
|
||||||
else:
|
|
||||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
|
|
||||||
if not os.path.isfile(userConfFile):
|
|
||||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
|
|
||||||
userConf = _readOptions(userConfFile, None)
|
|
||||||
|
|
||||||
if userConf is None:
|
|
||||||
appdata_dir = os.environ.get('appdata')
|
|
||||||
if appdata_dir:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(appdata_dir, 'youtube-dl', 'config'),
|
|
||||||
default=None)
|
|
||||||
if userConf is None:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
|
|
||||||
default=None)
|
|
||||||
|
|
||||||
if userConf is None:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
|
|
||||||
default=None)
|
|
||||||
if userConf is None:
|
|
||||||
userConf = _readOptions(
|
|
||||||
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
|
|
||||||
default=None)
|
|
||||||
|
|
||||||
if userConf is None:
|
|
||||||
userConf = []
|
|
||||||
|
|
||||||
return userConf
|
|
||||||
|
|
||||||
def _format_option_string(option):
|
|
||||||
''' ('-o', '--option') -> -o, --format METAVAR'''
|
|
||||||
|
|
||||||
opts = []
|
|
||||||
|
|
||||||
if option._short_opts:
|
|
||||||
opts.append(option._short_opts[0])
|
|
||||||
if option._long_opts:
|
|
||||||
opts.append(option._long_opts[0])
|
|
||||||
if len(opts) > 1:
|
|
||||||
opts.insert(1, ', ')
|
|
||||||
|
|
||||||
if option.takes_value(): opts.append(' %s' % option.metavar)
|
|
||||||
|
|
||||||
return "".join(opts)
|
|
||||||
|
|
||||||
def _comma_separated_values_options_callback(option, opt_str, value, parser):
|
|
||||||
setattr(parser.values, option.dest, value.split(','))
|
|
||||||
|
|
||||||
def _hide_login_info(opts):
|
|
||||||
opts = list(opts)
|
|
||||||
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
|
||||||
try:
|
|
||||||
i = opts.index(private_opt)
|
|
||||||
opts[i+1] = '<PRIVATE>'
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
return opts
|
|
||||||
|
|
||||||
max_width = 80
|
|
||||||
max_help_position = 80
|
|
||||||
|
|
||||||
# No need to wrap help messages if we're on a wide console
|
|
||||||
columns = get_term_width()
|
|
||||||
if columns: max_width = columns
|
|
||||||
|
|
||||||
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
|
|
||||||
fmt.format_option_strings = _format_option_string
|
|
||||||
|
|
||||||
kw = {
|
|
||||||
'version' : __version__,
|
|
||||||
'formatter' : fmt,
|
|
||||||
'usage' : '%prog [options] url [url...]',
|
|
||||||
'conflict_handler' : 'resolve',
|
|
||||||
}
|
|
||||||
|
|
||||||
parser = optparse.OptionParser(**kw)
|
|
||||||
|
|
||||||
# option groups
|
|
||||||
general = optparse.OptionGroup(parser, 'General Options')
|
|
||||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
|
||||||
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
|
||||||
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
|
||||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
|
||||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
|
||||||
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
|
||||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
|
||||||
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
|
||||||
|
|
||||||
general.add_option('-h', '--help',
|
|
||||||
action='help', help='print this help text and exit')
|
|
||||||
general.add_option('-v', '--version',
|
|
||||||
action='version', help='print program version and exit')
|
|
||||||
general.add_option('-U', '--update',
|
|
||||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
|
||||||
general.add_option('-i', '--ignore-errors',
|
|
||||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
|
|
||||||
general.add_option('--abort-on-error',
|
|
||||||
action='store_false', dest='ignoreerrors',
|
|
||||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
|
||||||
general.add_option('--dump-user-agent',
|
|
||||||
action='store_true', dest='dump_user_agent',
|
|
||||||
help='display the current browser identification', default=False)
|
|
||||||
general.add_option('--user-agent',
|
|
||||||
dest='user_agent', help='specify a custom user agent', metavar='UA')
|
|
||||||
general.add_option('--referer',
|
|
||||||
dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
|
|
||||||
metavar='REF', default=None)
|
|
||||||
general.add_option('--add-header',
|
|
||||||
dest='headers', help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times', action="append",
|
|
||||||
metavar='FIELD:VALUE')
|
|
||||||
general.add_option('--list-extractors',
|
|
||||||
action='store_true', dest='list_extractors',
|
|
||||||
help='List all supported extractors and the URLs they would handle', default=False)
|
|
||||||
general.add_option('--extractor-descriptions',
|
|
||||||
action='store_true', dest='list_extractor_descriptions',
|
|
||||||
help='Output descriptions of all supported extractors', default=False)
|
|
||||||
general.add_option(
|
|
||||||
'--proxy', dest='proxy', default=None, metavar='URL',
|
|
||||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
|
||||||
general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
|
|
||||||
general.add_option(
|
|
||||||
'--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
|
|
||||||
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
|
|
||||||
general.add_option(
|
|
||||||
'--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
|
|
||||||
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
|
|
||||||
general.add_option(
|
|
||||||
'--no-cache-dir', action='store_const', const=None, dest='cachedir',
|
|
||||||
help='Disable filesystem caching')
|
|
||||||
general.add_option(
|
|
||||||
'--socket-timeout', dest='socket_timeout',
|
|
||||||
type=float, default=None, help=u'Time to wait before giving up, in seconds')
|
|
||||||
general.add_option(
|
|
||||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
|
||||||
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
|
||||||
general.add_option(
|
|
||||||
'--default-search',
|
|
||||||
dest='default_search', metavar='PREFIX',
|
|
||||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess. The default value "error" just throws an error.')
|
|
||||||
general.add_option(
|
|
||||||
'--ignore-config',
|
|
||||||
action='store_true',
|
|
||||||
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
|
||||||
general.add_option(
|
|
||||||
'--encoding', dest='encoding', metavar='ENCODING',
|
|
||||||
help='Force the specified encoding (experimental)')
|
|
||||||
|
|
||||||
selection.add_option(
|
|
||||||
'--playlist-start',
|
|
||||||
dest='playliststart', metavar='NUMBER', default=1, type=int,
|
|
||||||
help='playlist video to start at (default is %default)')
|
|
||||||
selection.add_option(
|
|
||||||
'--playlist-end',
|
|
||||||
dest='playlistend', metavar='NUMBER', default=None, type=int,
|
|
||||||
help='playlist video to end at (default is last)')
|
|
||||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
|
||||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
|
||||||
selection.add_option('--max-downloads', metavar='NUMBER',
|
|
||||||
dest='max_downloads', type=int, default=None,
|
|
||||||
help='Abort after downloading NUMBER files')
|
|
||||||
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
|
|
||||||
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
|
|
||||||
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
|
|
||||||
selection.add_option(
|
|
||||||
'--datebefore', metavar='DATE', dest='datebefore', default=None,
|
|
||||||
help='download only videos uploaded on or before this date (i.e. inclusive)')
|
|
||||||
selection.add_option(
|
|
||||||
'--dateafter', metavar='DATE', dest='dateafter', default=None,
|
|
||||||
help='download only videos uploaded on or after this date (i.e. inclusive)')
|
|
||||||
selection.add_option(
|
|
||||||
'--min-views', metavar='COUNT', dest='min_views',
|
|
||||||
default=None, type=int,
|
|
||||||
help="Do not download any videos with less than COUNT views",)
|
|
||||||
selection.add_option(
|
|
||||||
'--max-views', metavar='COUNT', dest='max_views',
|
|
||||||
default=None, type=int,
|
|
||||||
help="Do not download any videos with more than COUNT views",)
|
|
||||||
selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
|
|
||||||
selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
|
|
||||||
help='download only videos suitable for the given age',
|
|
||||||
default=None, type=int)
|
|
||||||
selection.add_option('--download-archive', metavar='FILE',
|
|
||||||
dest='download_archive',
|
|
||||||
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
|
|
||||||
selection.add_option(
|
|
||||||
'--include-ads', dest='include_ads',
|
|
||||||
action='store_true',
|
|
||||||
help='Download advertisements as well (experimental)')
|
|
||||||
selection.add_option(
|
|
||||||
'--youtube-include-dash-manifest', action='store_true',
|
|
||||||
dest='youtube_include_dash_manifest', default=False,
|
|
||||||
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
|
||||||
|
|
||||||
authentication.add_option('-u', '--username',
|
|
||||||
dest='username', metavar='USERNAME', help='account username')
|
|
||||||
authentication.add_option('-p', '--password',
|
|
||||||
dest='password', metavar='PASSWORD', help='account password')
|
|
||||||
authentication.add_option('-n', '--netrc',
|
|
||||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
|
||||||
authentication.add_option('--video-password',
|
|
||||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
|
||||||
|
|
||||||
|
|
||||||
video_format.add_option('-f', '--format',
|
|
||||||
action='store', dest='format', metavar='FORMAT', default=None,
|
|
||||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.')
|
|
||||||
video_format.add_option('--all-formats',
|
|
||||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
|
||||||
video_format.add_option('--prefer-free-formats',
|
|
||||||
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
|
|
||||||
video_format.add_option('--max-quality',
|
|
||||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
|
||||||
video_format.add_option('-F', '--list-formats',
|
|
||||||
action='store_true', dest='listformats', help='list all available formats')
|
|
||||||
|
|
||||||
subtitles.add_option('--write-sub', '--write-srt',
|
|
||||||
action='store_true', dest='writesubtitles',
|
|
||||||
help='write subtitle file', default=False)
|
|
||||||
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
|
|
||||||
action='store_true', dest='writeautomaticsub',
|
|
||||||
help='write automatic subtitle file (youtube only)', default=False)
|
|
||||||
subtitles.add_option('--all-subs',
|
|
||||||
action='store_true', dest='allsubtitles',
|
|
||||||
help='downloads all the available subtitles of the video', default=False)
|
|
||||||
subtitles.add_option('--list-subs',
|
|
||||||
action='store_true', dest='listsubtitles',
|
|
||||||
help='lists all available subtitles for the video', default=False)
|
|
||||||
subtitles.add_option('--sub-format',
|
|
||||||
action='store', dest='subtitlesformat', metavar='FORMAT',
|
|
||||||
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
|
||||||
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
|
|
||||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
|
||||||
default=[], callback=_comma_separated_values_options_callback,
|
|
||||||
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
|
||||||
|
|
||||||
downloader.add_option('-r', '--rate-limit',
|
|
||||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
|
||||||
downloader.add_option('-R', '--retries',
|
|
||||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
|
||||||
downloader.add_option('--buffer-size',
|
|
||||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
|
||||||
downloader.add_option('--no-resize-buffer',
|
|
||||||
action='store_true', dest='noresizebuffer',
|
|
||||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
|
||||||
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
|
||||||
|
|
||||||
verbosity.add_option('-q', '--quiet',
|
|
||||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
|
||||||
verbosity.add_option(
|
|
||||||
'--no-warnings',
|
|
||||||
dest='no_warnings', action='store_true', default=False,
|
|
||||||
help='Ignore warnings')
|
|
||||||
verbosity.add_option('-s', '--simulate',
|
|
||||||
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
|
||||||
verbosity.add_option('--skip-download',
|
|
||||||
action='store_true', dest='skip_download', help='do not download the video', default=False)
|
|
||||||
verbosity.add_option('-g', '--get-url',
|
|
||||||
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
|
||||||
verbosity.add_option('-e', '--get-title',
|
|
||||||
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
|
||||||
verbosity.add_option('--get-id',
|
|
||||||
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
|
||||||
verbosity.add_option('--get-thumbnail',
|
|
||||||
action='store_true', dest='getthumbnail',
|
|
||||||
help='simulate, quiet but print thumbnail URL', default=False)
|
|
||||||
verbosity.add_option('--get-description',
|
|
||||||
action='store_true', dest='getdescription',
|
|
||||||
help='simulate, quiet but print video description', default=False)
|
|
||||||
verbosity.add_option('--get-duration',
|
|
||||||
action='store_true', dest='getduration',
|
|
||||||
help='simulate, quiet but print video length', default=False)
|
|
||||||
verbosity.add_option('--get-filename',
|
|
||||||
action='store_true', dest='getfilename',
|
|
||||||
help='simulate, quiet but print output filename', default=False)
|
|
||||||
verbosity.add_option('--get-format',
|
|
||||||
action='store_true', dest='getformat',
|
|
||||||
help='simulate, quiet but print output format', default=False)
|
|
||||||
verbosity.add_option('-j', '--dump-json',
|
|
||||||
action='store_true', dest='dumpjson',
|
|
||||||
help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
|
|
||||||
verbosity.add_option('--newline',
|
|
||||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
|
||||||
verbosity.add_option('--no-progress',
|
|
||||||
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
|
||||||
verbosity.add_option('--console-title',
|
|
||||||
action='store_true', dest='consoletitle',
|
|
||||||
help='display progress in console titlebar', default=False)
|
|
||||||
verbosity.add_option('-v', '--verbose',
|
|
||||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
|
||||||
verbosity.add_option('--dump-intermediate-pages',
|
|
||||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
|
||||||
help='print downloaded pages to debug problems (very verbose)')
|
|
||||||
verbosity.add_option('--write-pages',
|
|
||||||
action='store_true', dest='write_pages', default=False,
|
|
||||||
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
|
||||||
verbosity.add_option('--youtube-print-sig-code',
|
|
||||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
|
||||||
help=optparse.SUPPRESS_HELP)
|
|
||||||
verbosity.add_option('--print-traffic',
|
|
||||||
dest='debug_printtraffic', action='store_true', default=False,
|
|
||||||
help='Display sent and read HTTP traffic')
|
|
||||||
|
|
||||||
|
|
||||||
filesystem.add_option('-t', '--title',
|
|
||||||
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
|
|
||||||
filesystem.add_option('--id',
|
|
||||||
action='store_true', dest='useid', help='use only video ID in file name', default=False)
|
|
||||||
filesystem.add_option('-l', '--literal',
|
|
||||||
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
|
|
||||||
filesystem.add_option('-A', '--auto-number',
|
|
||||||
action='store_true', dest='autonumber',
|
|
||||||
help='number downloaded files starting from 00000', default=False)
|
|
||||||
filesystem.add_option('-o', '--output',
|
|
||||||
dest='outtmpl', metavar='TEMPLATE',
|
|
||||||
help=('output filename template. Use %(title)s to get the title, '
|
|
||||||
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
|
||||||
'%(autonumber)s to get an automatically incremented number, '
|
|
||||||
'%(ext)s for the filename extension, '
|
|
||||||
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
|
||||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
|
||||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
|
||||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
|
||||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
|
||||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
|
||||||
'%(height)s and %(width)s for the width and height of the video format. '
|
|
||||||
'%(resolution)s for a textual description of the resolution of the video format. '
|
|
||||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
|
||||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
|
||||||
filesystem.add_option('--autonumber-size',
|
|
||||||
dest='autonumber_size', metavar='NUMBER',
|
|
||||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
|
||||||
filesystem.add_option('--restrict-filenames',
|
|
||||||
action='store_true', dest='restrictfilenames',
|
|
||||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
|
||||||
filesystem.add_option('-a', '--batch-file',
|
|
||||||
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
|
||||||
filesystem.add_option('--load-info',
|
|
||||||
dest='load_info_filename', metavar='FILE',
|
|
||||||
help='json file containing the video information (created with the "--write-json" option)')
|
|
||||||
filesystem.add_option('-w', '--no-overwrites',
|
|
||||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
|
||||||
filesystem.add_option('-c', '--continue',
|
|
||||||
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
|
||||||
filesystem.add_option('--no-continue',
|
|
||||||
action='store_false', dest='continue_dl',
|
|
||||||
help='do not resume partially downloaded files (restart from beginning)')
|
|
||||||
filesystem.add_option('--cookies',
|
|
||||||
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
|
|
||||||
filesystem.add_option('--no-part',
|
|
||||||
action='store_true', dest='nopart', help='do not use .part files', default=False)
|
|
||||||
filesystem.add_option('--no-mtime',
|
|
||||||
action='store_false', dest='updatetime',
|
|
||||||
help='do not use the Last-modified header to set the file modification time', default=True)
|
|
||||||
filesystem.add_option('--write-description',
|
|
||||||
action='store_true', dest='writedescription',
|
|
||||||
help='write video description to a .description file', default=False)
|
|
||||||
filesystem.add_option('--write-info-json',
|
|
||||||
action='store_true', dest='writeinfojson',
|
|
||||||
help='write video metadata to a .info.json file', default=False)
|
|
||||||
filesystem.add_option('--write-annotations',
|
|
||||||
action='store_true', dest='writeannotations',
|
|
||||||
help='write video annotations to a .annotation file', default=False)
|
|
||||||
filesystem.add_option('--write-thumbnail',
|
|
||||||
action='store_true', dest='writethumbnail',
|
|
||||||
help='write thumbnail image to disk', default=False)
|
|
||||||
|
|
||||||
|
|
||||||
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
|
|
||||||
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
|
||||||
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
|
||||||
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
|
||||||
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
|
|
||||||
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
|
||||||
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
|
|
||||||
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
|
|
||||||
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
|
|
||||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
|
||||||
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
|
||||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
|
||||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
|
||||||
help='embed subtitles in the video (only for mp4 videos)')
|
|
||||||
postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
|
|
||||||
help='embed thumbnail in the audio as cover art')
|
|
||||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
|
||||||
help='write metadata to the video file')
|
|
||||||
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
|
||||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
|
||||||
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
|
|
||||||
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
|
||||||
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
|
|
||||||
help='Prefer ffmpeg over avconv for running the postprocessors')
|
|
||||||
|
|
||||||
|
|
||||||
parser.add_option_group(general)
|
|
||||||
parser.add_option_group(selection)
|
|
||||||
parser.add_option_group(downloader)
|
|
||||||
parser.add_option_group(filesystem)
|
|
||||||
parser.add_option_group(verbosity)
|
|
||||||
parser.add_option_group(video_format)
|
|
||||||
parser.add_option_group(subtitles)
|
|
||||||
parser.add_option_group(authentication)
|
|
||||||
parser.add_option_group(postproc)
|
|
||||||
|
|
||||||
if overrideArguments is not None:
|
|
||||||
opts, args = parser.parse_args(overrideArguments)
|
|
||||||
if opts.verbose:
|
|
||||||
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
|
||||||
else:
|
|
||||||
commandLineConf = sys.argv[1:]
|
|
||||||
if '--ignore-config' in commandLineConf:
|
|
||||||
systemConf = []
|
|
||||||
userConf = []
|
|
||||||
else:
|
|
||||||
systemConf = _readOptions('/etc/youtube-dl.conf')
|
|
||||||
if '--ignore-config' in systemConf:
|
|
||||||
userConf = []
|
|
||||||
else:
|
|
||||||
userConf = _readUserConf()
|
|
||||||
argv = systemConf + userConf + commandLineConf
|
|
||||||
|
|
||||||
opts, args = parser.parse_args(argv)
|
|
||||||
if opts.verbose:
|
|
||||||
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
|
||||||
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
|
||||||
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
|
||||||
|
|
||||||
return parser, opts, args
|
|
||||||
|
|
||||||
|
|
||||||
def _real_main(argv=None):
|
def _real_main(argv=None):
|
||||||
@@ -570,7 +50,9 @@ def _real_main(argv=None):
|
|||||||
# https://github.com/rg3/youtube-dl/issues/820
|
# https://github.com/rg3/youtube-dl/issues/820
|
||||||
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
|
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
|
||||||
|
|
||||||
setproctitle(u'youtube-dl')
|
workaround_optparse_bug9161()
|
||||||
|
|
||||||
|
setproctitle('youtube-dl')
|
||||||
|
|
||||||
parser, opts, args = parseOpts(argv)
|
parser, opts, args = parseOpts(argv)
|
||||||
|
|
||||||
@@ -586,10 +68,10 @@ def _real_main(argv=None):
|
|||||||
if opts.headers is not None:
|
if opts.headers is not None:
|
||||||
for h in opts.headers:
|
for h in opts.headers:
|
||||||
if h.find(':', 1) < 0:
|
if h.find(':', 1) < 0:
|
||||||
parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h)
|
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
|
||||||
key, value = h.split(':', 2)
|
key, value = h.split(':', 2)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value))
|
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
|
||||||
std_headers[key] = value
|
std_headers[key] = value
|
||||||
|
|
||||||
# Dump user agent
|
# Dump user agent
|
||||||
@@ -607,94 +89,96 @@ def _real_main(argv=None):
|
|||||||
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
|
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
|
||||||
batch_urls = read_batch_urls(batchfd)
|
batch_urls = read_batch_urls(batchfd)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n')
|
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
||||||
except IOError:
|
except IOError:
|
||||||
sys.exit(u'ERROR: batch file could not be read')
|
sys.exit('ERROR: batch file could not be read')
|
||||||
all_urls = batch_urls + args
|
all_urls = batch_urls + args
|
||||||
all_urls = [url.strip() for url in all_urls]
|
all_urls = [url.strip() for url in all_urls]
|
||||||
_enc = preferredencoding()
|
_enc = preferredencoding()
|
||||||
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
|
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
|
||||||
|
|
||||||
extractors = gen_extractors()
|
|
||||||
|
|
||||||
if opts.list_extractors:
|
if opts.list_extractors:
|
||||||
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
for ie in list_extractors(opts.age_limit):
|
||||||
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
||||||
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||||
for mu in matchedUrls:
|
for mu in matchedUrls:
|
||||||
compat_print(u' ' + mu)
|
compat_print(' ' + mu)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
if opts.list_extractor_descriptions:
|
if opts.list_extractor_descriptions:
|
||||||
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
for ie in list_extractors(opts.age_limit):
|
||||||
if not ie._WORKING:
|
if not ie._WORKING:
|
||||||
continue
|
continue
|
||||||
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
|
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
|
||||||
if desc is False:
|
if desc is False:
|
||||||
continue
|
continue
|
||||||
if hasattr(ie, 'SEARCH_KEY'):
|
if hasattr(ie, 'SEARCH_KEY'):
|
||||||
_SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise')
|
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
|
||||||
_COUNTS = (u'', u'5', u'10', u'all')
|
_COUNTS = ('', '5', '10', 'all')
|
||||||
desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
|
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
|
||||||
compat_print(desc)
|
compat_print(desc)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
# Conflicting, missing and erroneous options
|
# Conflicting, missing and erroneous options
|
||||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||||
parser.error(u'using .netrc conflicts with giving username/password')
|
parser.error('using .netrc conflicts with giving username/password')
|
||||||
if opts.password is not None and opts.username is None:
|
if opts.password is not None and opts.username is None:
|
||||||
parser.error(u'account username missing\n')
|
parser.error('account username missing\n')
|
||||||
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
|
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
|
||||||
parser.error(u'using output template conflicts with using title, video ID or auto number')
|
parser.error('using output template conflicts with using title, video ID or auto number')
|
||||||
if opts.usetitle and opts.useid:
|
if opts.usetitle and opts.useid:
|
||||||
parser.error(u'using title conflicts with using video ID')
|
parser.error('using title conflicts with using video ID')
|
||||||
if opts.username is not None and opts.password is None:
|
if opts.username is not None and opts.password is None:
|
||||||
opts.password = compat_getpass(u'Type account password and press [Return]: ')
|
opts.password = compat_getpass('Type account password and press [Return]: ')
|
||||||
if opts.ratelimit is not None:
|
if opts.ratelimit is not None:
|
||||||
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
|
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
|
||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error(u'invalid rate limit specified')
|
parser.error('invalid rate limit specified')
|
||||||
opts.ratelimit = numeric_limit
|
opts.ratelimit = numeric_limit
|
||||||
if opts.min_filesize is not None:
|
if opts.min_filesize is not None:
|
||||||
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
|
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
|
||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error(u'invalid min_filesize specified')
|
parser.error('invalid min_filesize specified')
|
||||||
opts.min_filesize = numeric_limit
|
opts.min_filesize = numeric_limit
|
||||||
if opts.max_filesize is not None:
|
if opts.max_filesize is not None:
|
||||||
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
|
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
|
||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error(u'invalid max_filesize specified')
|
parser.error('invalid max_filesize specified')
|
||||||
opts.max_filesize = numeric_limit
|
opts.max_filesize = numeric_limit
|
||||||
if opts.retries is not None:
|
if opts.retries is not None:
|
||||||
|
if opts.retries in ('inf', 'infinite'):
|
||||||
|
opts_retries = float('inf')
|
||||||
|
else:
|
||||||
try:
|
try:
|
||||||
opts.retries = int(opts.retries)
|
opts_retries = int(opts.retries)
|
||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
parser.error(u'invalid retry count specified')
|
parser.error('invalid retry count specified')
|
||||||
if opts.buffersize is not None:
|
if opts.buffersize is not None:
|
||||||
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
||||||
if numeric_buffersize is None:
|
if numeric_buffersize is None:
|
||||||
parser.error(u'invalid buffer size specified')
|
parser.error('invalid buffer size specified')
|
||||||
opts.buffersize = numeric_buffersize
|
opts.buffersize = numeric_buffersize
|
||||||
if opts.playliststart <= 0:
|
if opts.playliststart <= 0:
|
||||||
raise ValueError(u'Playlist start must be positive')
|
raise ValueError('Playlist start must be positive')
|
||||||
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
||||||
raise ValueError(u'Playlist end must be greater than playlist start')
|
raise ValueError('Playlist end must be greater than playlist start')
|
||||||
if opts.extractaudio:
|
if opts.extractaudio:
|
||||||
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
|
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
|
||||||
parser.error(u'invalid audio format specified')
|
parser.error('invalid audio format specified')
|
||||||
if opts.audioquality:
|
if opts.audioquality:
|
||||||
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
||||||
if not opts.audioquality.isdigit():
|
if not opts.audioquality.isdigit():
|
||||||
parser.error(u'invalid audio quality specified')
|
parser.error('invalid audio quality specified')
|
||||||
if opts.recodevideo is not None:
|
if opts.recodevideo is not None:
|
||||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
||||||
parser.error(u'invalid video recode format specified')
|
parser.error('invalid video recode format specified')
|
||||||
|
if opts.convertsubtitles is not None:
|
||||||
|
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
|
||||||
|
parser.error('invalid subtitle format specified')
|
||||||
|
|
||||||
if opts.date is not None:
|
if opts.date is not None:
|
||||||
date = DateRange.day(opts.date)
|
date = DateRange.day(opts.date)
|
||||||
else:
|
else:
|
||||||
date = DateRange(opts.dateafter, opts.datebefore)
|
date = DateRange(opts.dateafter, opts.datebefore)
|
||||||
if opts.default_search not in ('auto', 'auto_warning', None) and ':' not in opts.default_search:
|
|
||||||
parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
|
|
||||||
|
|
||||||
# Do not download videos when there are audio-only formats
|
# Do not download videos when there are audio-only formats
|
||||||
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||||
@@ -702,35 +186,95 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||||
# this was the old behaviour if only --all-sub was given.
|
# this was the old behaviour if only --all-sub was given.
|
||||||
if opts.allsubtitles and (opts.writeautomaticsub == False):
|
if opts.allsubtitles and not opts.writeautomaticsub:
|
||||||
opts.writesubtitles = True
|
opts.writesubtitles = True
|
||||||
|
|
||||||
if sys.version_info < (3,):
|
if sys.version_info < (3,):
|
||||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||||
if opts.outtmpl is not None:
|
if opts.outtmpl is not None:
|
||||||
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
||||||
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
|
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
|
||||||
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
|
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
|
||||||
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
|
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
|
||||||
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
|
||||||
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
|
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
|
||||||
or (opts.useid and u'%(id)s.%(ext)s')
|
(opts.useid and '%(id)s.%(ext)s') or
|
||||||
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
|
||||||
or DEFAULT_OUTTMPL)
|
DEFAULT_OUTTMPL)
|
||||||
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
||||||
parser.error(u'Cannot download a video and extract audio into the same'
|
parser.error('Cannot download a video and extract audio into the same'
|
||||||
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||||
u' template'.format(outtmpl))
|
' template'.format(outtmpl))
|
||||||
|
|
||||||
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
|
any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||||
download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
any_printing = opts.print_json
|
||||||
|
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
||||||
|
|
||||||
|
# PostProcessors
|
||||||
|
postprocessors = []
|
||||||
|
# Add the metadata pp first, the other pps will copy it
|
||||||
|
if opts.metafromtitle:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'MetadataFromTitle',
|
||||||
|
'titleformat': opts.metafromtitle
|
||||||
|
})
|
||||||
|
if opts.addmetadata:
|
||||||
|
postprocessors.append({'key': 'FFmpegMetadata'})
|
||||||
|
if opts.extractaudio:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'FFmpegExtractAudio',
|
||||||
|
'preferredcodec': opts.audioformat,
|
||||||
|
'preferredquality': opts.audioquality,
|
||||||
|
'nopostoverwrites': opts.nopostoverwrites,
|
||||||
|
})
|
||||||
|
if opts.recodevideo:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'FFmpegVideoConvertor',
|
||||||
|
'preferedformat': opts.recodevideo,
|
||||||
|
})
|
||||||
|
if opts.convertsubtitles:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'FFmpegSubtitlesConvertor',
|
||||||
|
'format': opts.convertsubtitles,
|
||||||
|
})
|
||||||
|
if opts.embedsubtitles:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'FFmpegEmbedSubtitle',
|
||||||
|
})
|
||||||
|
if opts.xattrs:
|
||||||
|
postprocessors.append({'key': 'XAttrMetadata'})
|
||||||
|
if opts.embedthumbnail:
|
||||||
|
if not opts.addmetadata:
|
||||||
|
postprocessors.append({'key': 'FFmpegAudioFix'})
|
||||||
|
postprocessors.append({'key': 'AtomicParsley'})
|
||||||
|
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
|
||||||
|
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
|
||||||
|
if opts.exec_cmd:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'ExecAfterDownload',
|
||||||
|
'verboseOutput': opts.verbose,
|
||||||
|
'exec_cmd': opts.exec_cmd,
|
||||||
|
})
|
||||||
|
if opts.xattr_set_filesize:
|
||||||
|
try:
|
||||||
|
import xattr
|
||||||
|
xattr # Confuse flake8
|
||||||
|
except ImportError:
|
||||||
|
parser.error('setting filesize xattr requested but python-xattr is not available')
|
||||||
|
external_downloader_args = None
|
||||||
|
if opts.external_downloader_args:
|
||||||
|
external_downloader_args = shlex.split(opts.external_downloader_args)
|
||||||
|
match_filter = (
|
||||||
|
None if opts.match_filter is None
|
||||||
|
else match_filter_func(opts.match_filter))
|
||||||
|
|
||||||
ydl_opts = {
|
ydl_opts = {
|
||||||
'usenetrc': opts.usenetrc,
|
'usenetrc': opts.usenetrc,
|
||||||
'username': opts.username,
|
'username': opts.username,
|
||||||
'password': opts.password,
|
'password': opts.password,
|
||||||
|
'twofactor': opts.twofactor,
|
||||||
'videopassword': opts.videopassword,
|
'videopassword': opts.videopassword,
|
||||||
'quiet': (opts.quiet or any_printing),
|
'quiet': (opts.quiet or any_getting or any_printing),
|
||||||
'no_warnings': opts.no_warnings,
|
'no_warnings': opts.no_warnings,
|
||||||
'forceurl': opts.geturl,
|
'forceurl': opts.geturl,
|
||||||
'forcetitle': opts.gettitle,
|
'forcetitle': opts.gettitle,
|
||||||
@@ -740,9 +284,10 @@ def _real_main(argv=None):
|
|||||||
'forceduration': opts.getduration,
|
'forceduration': opts.getduration,
|
||||||
'forcefilename': opts.getfilename,
|
'forcefilename': opts.getfilename,
|
||||||
'forceformat': opts.getformat,
|
'forceformat': opts.getformat,
|
||||||
'forcejson': opts.dumpjson,
|
'forcejson': opts.dumpjson or opts.print_json,
|
||||||
'simulate': opts.simulate,
|
'dump_single_json': opts.dump_single_json,
|
||||||
'skip_download': (opts.skip_download or opts.simulate or any_printing),
|
'simulate': opts.simulate or any_getting,
|
||||||
|
'skip_download': opts.skip_download,
|
||||||
'format': opts.format,
|
'format': opts.format,
|
||||||
'format_limit': opts.format_limit,
|
'format_limit': opts.format_limit,
|
||||||
'listformats': opts.listformats,
|
'listformats': opts.listformats,
|
||||||
@@ -752,7 +297,7 @@ def _real_main(argv=None):
|
|||||||
'ignoreerrors': opts.ignoreerrors,
|
'ignoreerrors': opts.ignoreerrors,
|
||||||
'ratelimit': opts.ratelimit,
|
'ratelimit': opts.ratelimit,
|
||||||
'nooverwrites': opts.nooverwrites,
|
'nooverwrites': opts.nooverwrites,
|
||||||
'retries': opts.retries,
|
'retries': opts_retries,
|
||||||
'buffersize': opts.buffersize,
|
'buffersize': opts.buffersize,
|
||||||
'noresizebuffer': opts.noresizebuffer,
|
'noresizebuffer': opts.noresizebuffer,
|
||||||
'continuedl': opts.continue_dl,
|
'continuedl': opts.continue_dl,
|
||||||
@@ -760,6 +305,7 @@ def _real_main(argv=None):
|
|||||||
'progress_with_newline': opts.progress_with_newline,
|
'progress_with_newline': opts.progress_with_newline,
|
||||||
'playliststart': opts.playliststart,
|
'playliststart': opts.playliststart,
|
||||||
'playlistend': opts.playlistend,
|
'playlistend': opts.playlistend,
|
||||||
|
'playlistreverse': opts.playlist_reverse,
|
||||||
'noplaylist': opts.noplaylist,
|
'noplaylist': opts.noplaylist,
|
||||||
'logtostderr': opts.outtmpl == '-',
|
'logtostderr': opts.outtmpl == '-',
|
||||||
'consoletitle': opts.consoletitle,
|
'consoletitle': opts.consoletitle,
|
||||||
@@ -769,6 +315,7 @@ def _real_main(argv=None):
|
|||||||
'writeannotations': opts.writeannotations,
|
'writeannotations': opts.writeannotations,
|
||||||
'writeinfojson': opts.writeinfojson,
|
'writeinfojson': opts.writeinfojson,
|
||||||
'writethumbnail': opts.writethumbnail,
|
'writethumbnail': opts.writethumbnail,
|
||||||
|
'write_all_thumbnails': opts.write_all_thumbnails,
|
||||||
'writesubtitles': opts.writesubtitles,
|
'writesubtitles': opts.writesubtitles,
|
||||||
'writeautomaticsub': opts.writeautomaticsub,
|
'writeautomaticsub': opts.writeautomaticsub,
|
||||||
'allsubtitles': opts.allsubtitles,
|
'allsubtitles': opts.allsubtitles,
|
||||||
@@ -805,47 +352,52 @@ def _real_main(argv=None):
|
|||||||
'default_search': opts.default_search,
|
'default_search': opts.default_search,
|
||||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||||
'encoding': opts.encoding,
|
'encoding': opts.encoding,
|
||||||
|
'exec_cmd': opts.exec_cmd,
|
||||||
|
'extract_flat': opts.extract_flat,
|
||||||
|
'merge_output_format': opts.merge_output_format,
|
||||||
|
'postprocessors': postprocessors,
|
||||||
|
'fixup': opts.fixup,
|
||||||
|
'source_address': opts.source_address,
|
||||||
|
'call_home': opts.call_home,
|
||||||
|
'sleep_interval': opts.sleep_interval,
|
||||||
|
'external_downloader': opts.external_downloader,
|
||||||
|
'list_thumbnails': opts.list_thumbnails,
|
||||||
|
'playlist_items': opts.playlist_items,
|
||||||
|
'xattr_set_filesize': opts.xattr_set_filesize,
|
||||||
|
'match_filter': match_filter,
|
||||||
|
'no_color': opts.no_color,
|
||||||
|
'ffmpeg_location': opts.ffmpeg_location,
|
||||||
|
'hls_prefer_native': opts.hls_prefer_native,
|
||||||
|
'external_downloader_args': external_downloader_args,
|
||||||
|
'cn_verification_proxy': opts.cn_verification_proxy,
|
||||||
}
|
}
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
ydl.print_debug_header()
|
|
||||||
ydl.add_default_info_extractors()
|
|
||||||
|
|
||||||
# PostProcessors
|
|
||||||
# Add the metadata pp first, the other pps will copy it
|
|
||||||
if opts.addmetadata:
|
|
||||||
ydl.add_post_processor(FFmpegMetadataPP())
|
|
||||||
if opts.extractaudio:
|
|
||||||
ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites))
|
|
||||||
if opts.recodevideo:
|
|
||||||
ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
|
|
||||||
if opts.embedsubtitles:
|
|
||||||
ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
|
|
||||||
if opts.xattrs:
|
|
||||||
ydl.add_post_processor(XAttrMetadataPP())
|
|
||||||
if opts.embedthumbnail:
|
|
||||||
if not opts.addmetadata:
|
|
||||||
ydl.add_post_processor(FFmpegAudioFixPP())
|
|
||||||
ydl.add_post_processor(AtomicParsleyPP())
|
|
||||||
|
|
||||||
# Update version
|
# Update version
|
||||||
if opts.update_self:
|
if opts.update_self:
|
||||||
update_self(ydl.to_screen, opts.verbose)
|
update_self(ydl.to_screen, opts.verbose)
|
||||||
|
|
||||||
|
# Remove cache dir
|
||||||
|
if opts.rm_cachedir:
|
||||||
|
ydl.cache.remove()
|
||||||
|
|
||||||
# Maybe do nothing
|
# Maybe do nothing
|
||||||
if (len(all_urls) < 1) and (opts.load_info_filename is None):
|
if (len(all_urls) < 1) and (opts.load_info_filename is None):
|
||||||
if not opts.update_self:
|
if opts.update_self or opts.rm_cachedir:
|
||||||
parser.error(u'you must provide at least one URL')
|
|
||||||
else:
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
|
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
|
||||||
|
parser.error(
|
||||||
|
'You must provide at least one URL.\n'
|
||||||
|
'Type youtube-dl --help to see a list of all options.')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if opts.load_info_filename is not None:
|
if opts.load_info_filename is not None:
|
||||||
retcode = ydl.download_with_info_file(opts.load_info_filename)
|
retcode = ydl.download_with_info_file(opts.load_info_filename)
|
||||||
else:
|
else:
|
||||||
retcode = ydl.download(all_urls)
|
retcode = ydl.download(all_urls)
|
||||||
except MaxDownloadsReached:
|
except MaxDownloadsReached:
|
||||||
ydl.to_screen(u'--max-download limit reached, aborting.')
|
ydl.to_screen('--max-download limit reached, aborting.')
|
||||||
retcode = 101
|
retcode = 101
|
||||||
|
|
||||||
sys.exit(retcode)
|
sys.exit(retcode)
|
||||||
@@ -857,6 +409,8 @@ def main(argv=None):
|
|||||||
except DownloadError:
|
except DownloadError:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
except SameFileError:
|
except SameFileError:
|
||||||
sys.exit(u'ERROR: fixed output name but more than one file to download')
|
sys.exit('ERROR: fixed output name but more than one file to download')
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
sys.exit(u'\nERROR: Interrupted by user')
|
sys.exit('\nERROR: Interrupted by user')
|
||||||
|
|
||||||
|
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Execute with
|
# Execute with
|
||||||
# $ python youtube_dl/__main__.py (2.6+)
|
# $ python youtube_dl/__main__.py (2.6+)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
from math import ceil
|
from math import ceil
|
||||||
@@ -7,6 +7,7 @@ from .utils import bytes_to_intlist, intlist_to_bytes
|
|||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
BLOCK_SIZE_BYTES = 16
|
||||||
|
|
||||||
|
|
||||||
def aes_ctr_decrypt(data, key, counter):
|
def aes_ctr_decrypt(data, key, counter):
|
||||||
"""
|
"""
|
||||||
Decrypt with aes in counter mode
|
Decrypt with aes in counter mode
|
||||||
@@ -32,6 +33,7 @@ def aes_ctr_decrypt(data, key, counter):
|
|||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_decrypt(data, key, iv):
|
def aes_cbc_decrypt(data, key, iv):
|
||||||
"""
|
"""
|
||||||
Decrypt with aes in CBC mode
|
Decrypt with aes in CBC mode
|
||||||
@@ -57,6 +59,7 @@ def aes_cbc_decrypt(data, key, iv):
|
|||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
|
||||||
def key_expansion(data):
|
def key_expansion(data):
|
||||||
"""
|
"""
|
||||||
Generate key schedule
|
Generate key schedule
|
||||||
@@ -91,6 +94,7 @@ def key_expansion(data):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_encrypt(data, expanded_key):
|
def aes_encrypt(data, expanded_key):
|
||||||
"""
|
"""
|
||||||
Encrypt one block with aes
|
Encrypt one block with aes
|
||||||
@@ -111,6 +115,7 @@ def aes_encrypt(data, expanded_key):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt(data, expanded_key):
|
def aes_decrypt(data, expanded_key):
|
||||||
"""
|
"""
|
||||||
Decrypt one block with aes
|
Decrypt one block with aes
|
||||||
@@ -131,6 +136,7 @@ def aes_decrypt(data, expanded_key):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt_text(data, password, key_size_bytes):
|
def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
"""
|
"""
|
||||||
Decrypt text
|
Decrypt text
|
||||||
@@ -157,6 +163,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
|||||||
|
|
||||||
class Counter:
|
class Counter:
|
||||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||||
|
|
||||||
def next_value(self):
|
def next_value(self):
|
||||||
temp = self.__value
|
temp = self.__value
|
||||||
self.__value = inc(self.__value)
|
self.__value = inc(self.__value)
|
||||||
@@ -241,15 +248,19 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
|
|||||||
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
||||||
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||||
|
|
||||||
|
|
||||||
def sub_bytes(data):
|
def sub_bytes(data):
|
||||||
return [SBOX[x] for x in data]
|
return [SBOX[x] for x in data]
|
||||||
|
|
||||||
|
|
||||||
def sub_bytes_inv(data):
|
def sub_bytes_inv(data):
|
||||||
return [SBOX_INV[x] for x in data]
|
return [SBOX_INV[x] for x in data]
|
||||||
|
|
||||||
|
|
||||||
def rotate(data):
|
def rotate(data):
|
||||||
return data[1:] + [data[0]]
|
return data[1:] + [data[0]]
|
||||||
|
|
||||||
|
|
||||||
def key_schedule_core(data, rcon_iteration):
|
def key_schedule_core(data, rcon_iteration):
|
||||||
data = rotate(data)
|
data = rotate(data)
|
||||||
data = sub_bytes(data)
|
data = sub_bytes(data)
|
||||||
@@ -257,14 +268,17 @@ def key_schedule_core(data, rcon_iteration):
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def xor(data1, data2):
|
def xor(data1, data2):
|
||||||
return [x ^ y for x, y in zip(data1, data2)]
|
return [x ^ y for x, y in zip(data1, data2)]
|
||||||
|
|
||||||
|
|
||||||
def rijndael_mul(a, b):
|
def rijndael_mul(a, b):
|
||||||
if(a == 0 or b == 0):
|
if(a == 0 or b == 0):
|
||||||
return 0
|
return 0
|
||||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||||
|
|
||||||
|
|
||||||
def mix_column(data, matrix):
|
def mix_column(data, matrix):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
@@ -275,6 +289,7 @@ def mix_column(data, matrix):
|
|||||||
data_mixed.append(mixed)
|
data_mixed.append(mixed)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
|
|
||||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for i in range(4):
|
for i in range(4):
|
||||||
@@ -282,9 +297,11 @@ def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
|||||||
data_mixed += mix_column(column, matrix)
|
data_mixed += mix_column(column, matrix)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
|
|
||||||
def mix_columns_inv(data):
|
def mix_columns_inv(data):
|
||||||
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
||||||
|
|
||||||
|
|
||||||
def shift_rows(data):
|
def shift_rows(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
@@ -292,6 +309,7 @@ def shift_rows(data):
|
|||||||
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
|
||||||
def shift_rows_inv(data):
|
def shift_rows_inv(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
@@ -299,6 +317,7 @@ def shift_rows_inv(data):
|
|||||||
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
|
||||||
def inc(data):
|
def inc(data):
|
||||||
data = data[:] # copy
|
data = data[:] # copy
|
||||||
for i in range(len(data) - 1, -1, -1):
|
for i in range(len(data) - 1, -1, -1):
|
||||||
@@ -308,3 +327,5 @@ def inc(data):
|
|||||||
data[i] = data[i] + 1
|
data[i] = data[i] + 1
|
||||||
break
|
break
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
|
||||||
|
|||||||
93
youtube_dl/cache.py
Normal file
93
youtube_dl/cache.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from .compat import compat_expanduser, compat_getenv
|
||||||
|
from .utils import write_json_file
|
||||||
|
|
||||||
|
|
||||||
|
class Cache(object):
|
||||||
|
def __init__(self, ydl):
|
||||||
|
self._ydl = ydl
|
||||||
|
|
||||||
|
def _get_root_dir(self):
|
||||||
|
res = self._ydl.params.get('cachedir')
|
||||||
|
if res is None:
|
||||||
|
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
|
||||||
|
res = os.path.join(cache_root, 'youtube-dl')
|
||||||
|
return compat_expanduser(res)
|
||||||
|
|
||||||
|
def _get_cache_fn(self, section, key, dtype):
|
||||||
|
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
|
||||||
|
'invalid section %r' % section
|
||||||
|
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
|
||||||
|
return os.path.join(
|
||||||
|
self._get_root_dir(), section, '%s.%s' % (key, dtype))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def enabled(self):
|
||||||
|
return self._ydl.params.get('cachedir') is not False
|
||||||
|
|
||||||
|
def store(self, section, key, data, dtype='json'):
|
||||||
|
assert dtype in ('json',)
|
||||||
|
|
||||||
|
if not self.enabled:
|
||||||
|
return
|
||||||
|
|
||||||
|
fn = self._get_cache_fn(section, key, dtype)
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
os.makedirs(os.path.dirname(fn))
|
||||||
|
except OSError as ose:
|
||||||
|
if ose.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
|
write_json_file(data, fn)
|
||||||
|
except Exception:
|
||||||
|
tb = traceback.format_exc()
|
||||||
|
self._ydl.report_warning(
|
||||||
|
'Writing cache to %r failed: %s' % (fn, tb))
|
||||||
|
|
||||||
|
def load(self, section, key, dtype='json', default=None):
|
||||||
|
assert dtype in ('json',)
|
||||||
|
|
||||||
|
if not self.enabled:
|
||||||
|
return default
|
||||||
|
|
||||||
|
cache_fn = self._get_cache_fn(section, key, dtype)
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
|
||||||
|
return json.load(cachef)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
file_size = os.path.getsize(cache_fn)
|
||||||
|
except (OSError, IOError) as oe:
|
||||||
|
file_size = str(oe)
|
||||||
|
self._ydl.report_warning(
|
||||||
|
'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
|
||||||
|
except IOError:
|
||||||
|
pass # No cache available
|
||||||
|
|
||||||
|
return default
|
||||||
|
|
||||||
|
def remove(self):
|
||||||
|
if not self.enabled:
|
||||||
|
self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
|
||||||
|
return
|
||||||
|
|
||||||
|
cachedir = self._get_root_dir()
|
||||||
|
if not any((term in cachedir) for term in ('cache', 'tmp')):
|
||||||
|
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
|
||||||
|
|
||||||
|
self._ydl.to_screen(
|
||||||
|
'Removing cache dir %s .' % cachedir, skip_eol=True)
|
||||||
|
if os.path.exists(cachedir):
|
||||||
|
self._ydl.to_screen('.', skip_eol=True)
|
||||||
|
shutil.rmtree(cachedir)
|
||||||
|
self._ydl.to_screen('.')
|
||||||
428
youtube_dl/compat.py
Normal file
428
youtube_dl/compat.py
Normal file
@@ -0,0 +1,428 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import getpass
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.request as compat_urllib_request
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urllib2 as compat_urllib_request
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.error as compat_urllib_error
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urllib2 as compat_urllib_error
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.parse as compat_urllib_parse
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urllib as compat_urllib_parse
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import urlparse as compat_urllib_parse_urlparse
|
||||||
|
except ImportError: # Python 2
|
||||||
|
from urlparse import urlparse as compat_urllib_parse_urlparse
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.parse as compat_urlparse
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urlparse as compat_urlparse
|
||||||
|
|
||||||
|
try:
|
||||||
|
import http.cookiejar as compat_cookiejar
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import cookielib as compat_cookiejar
|
||||||
|
|
||||||
|
try:
|
||||||
|
import html.entities as compat_html_entities
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import htmlentitydefs as compat_html_entities
|
||||||
|
|
||||||
|
try:
|
||||||
|
import html.parser as compat_html_parser
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import HTMLParser as compat_html_parser
|
||||||
|
|
||||||
|
try:
|
||||||
|
import http.client as compat_http_client
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import httplib as compat_http_client
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.error import HTTPError as compat_HTTPError
|
||||||
|
except ImportError: # Python 2
|
||||||
|
from urllib2 import HTTPError as compat_HTTPError
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.request import urlretrieve as compat_urlretrieve
|
||||||
|
except ImportError: # Python 2
|
||||||
|
from urllib import urlretrieve as compat_urlretrieve
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from subprocess import DEVNULL
|
||||||
|
compat_subprocess_get_DEVNULL = lambda: DEVNULL
|
||||||
|
except ImportError:
|
||||||
|
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
|
||||||
|
|
||||||
|
try:
|
||||||
|
import http.server as compat_http_server
|
||||||
|
except ImportError:
|
||||||
|
import BaseHTTPServer as compat_http_server
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import unquote as compat_urllib_parse_unquote
|
||||||
|
except ImportError:
|
||||||
|
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
||||||
|
if string == '':
|
||||||
|
return string
|
||||||
|
res = string.split('%')
|
||||||
|
if len(res) == 1:
|
||||||
|
return string
|
||||||
|
if encoding is None:
|
||||||
|
encoding = 'utf-8'
|
||||||
|
if errors is None:
|
||||||
|
errors = 'replace'
|
||||||
|
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
|
||||||
|
pct_sequence = b''
|
||||||
|
string = res[0]
|
||||||
|
for item in res[1:]:
|
||||||
|
try:
|
||||||
|
if not item:
|
||||||
|
raise ValueError
|
||||||
|
pct_sequence += item[:2].decode('hex')
|
||||||
|
rest = item[2:]
|
||||||
|
if not rest:
|
||||||
|
# This segment was just a single percent-encoded character.
|
||||||
|
# May be part of a sequence of code units, so delay decoding.
|
||||||
|
# (Stored in pct_sequence).
|
||||||
|
continue
|
||||||
|
except ValueError:
|
||||||
|
rest = '%' + item
|
||||||
|
# Encountered non-percent-encoded characters. Flush the current
|
||||||
|
# pct_sequence.
|
||||||
|
string += pct_sequence.decode(encoding, errors) + rest
|
||||||
|
pct_sequence = b''
|
||||||
|
if pct_sequence:
|
||||||
|
# Flush the final pct_sequence
|
||||||
|
string += pct_sequence.decode(encoding, errors)
|
||||||
|
return string
|
||||||
|
|
||||||
|
try:
|
||||||
|
compat_str = unicode # Python 2
|
||||||
|
except NameError:
|
||||||
|
compat_str = str
|
||||||
|
|
||||||
|
try:
|
||||||
|
compat_basestring = basestring # Python 2
|
||||||
|
except NameError:
|
||||||
|
compat_basestring = str
|
||||||
|
|
||||||
|
try:
|
||||||
|
compat_chr = unichr # Python 2
|
||||||
|
except NameError:
|
||||||
|
compat_chr = chr
|
||||||
|
|
||||||
|
try:
|
||||||
|
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
|
||||||
|
except ImportError: # Python 2.6
|
||||||
|
from xml.parsers.expat import ExpatError as compat_xml_parse_error
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import parse_qs as compat_parse_qs
|
||||||
|
except ImportError: # Python 2
|
||||||
|
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
|
||||||
|
# Python 2's version is apparently totally broken
|
||||||
|
|
||||||
|
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
|
||||||
|
encoding='utf-8', errors='replace'):
|
||||||
|
qs, _coerce_result = qs, compat_str
|
||||||
|
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
|
||||||
|
r = []
|
||||||
|
for name_value in pairs:
|
||||||
|
if not name_value and not strict_parsing:
|
||||||
|
continue
|
||||||
|
nv = name_value.split('=', 1)
|
||||||
|
if len(nv) != 2:
|
||||||
|
if strict_parsing:
|
||||||
|
raise ValueError("bad query field: %r" % (name_value,))
|
||||||
|
# Handle case of a control-name with no equal sign
|
||||||
|
if keep_blank_values:
|
||||||
|
nv.append('')
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
if len(nv[1]) or keep_blank_values:
|
||||||
|
name = nv[0].replace('+', ' ')
|
||||||
|
name = compat_urllib_parse_unquote(
|
||||||
|
name, encoding=encoding, errors=errors)
|
||||||
|
name = _coerce_result(name)
|
||||||
|
value = nv[1].replace('+', ' ')
|
||||||
|
value = compat_urllib_parse_unquote(
|
||||||
|
value, encoding=encoding, errors=errors)
|
||||||
|
value = _coerce_result(value)
|
||||||
|
r.append((name, value))
|
||||||
|
return r
|
||||||
|
|
||||||
|
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
|
||||||
|
encoding='utf-8', errors='replace'):
|
||||||
|
parsed_result = {}
|
||||||
|
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
|
||||||
|
encoding=encoding, errors=errors)
|
||||||
|
for name, value in pairs:
|
||||||
|
if name in parsed_result:
|
||||||
|
parsed_result[name].append(value)
|
||||||
|
else:
|
||||||
|
parsed_result[name] = [value]
|
||||||
|
return parsed_result
|
||||||
|
|
||||||
|
try:
|
||||||
|
from shlex import quote as shlex_quote
|
||||||
|
except ImportError: # Python < 3.3
|
||||||
|
def shlex_quote(s):
|
||||||
|
if re.match(r'^[-_\w./]+$', s):
|
||||||
|
return s
|
||||||
|
else:
|
||||||
|
return "'" + s.replace("'", "'\"'\"'") + "'"
|
||||||
|
|
||||||
|
|
||||||
|
def compat_ord(c):
|
||||||
|
if type(c) is int:
|
||||||
|
return c
|
||||||
|
else:
|
||||||
|
return ord(c)
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 0):
|
||||||
|
compat_getenv = os.getenv
|
||||||
|
compat_expanduser = os.path.expanduser
|
||||||
|
else:
|
||||||
|
# Environment variables should be decoded with filesystem encoding.
|
||||||
|
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
|
||||||
|
|
||||||
|
def compat_getenv(key, default=None):
|
||||||
|
from .utils import get_filesystem_encoding
|
||||||
|
env = os.getenv(key, default)
|
||||||
|
if env:
|
||||||
|
env = env.decode(get_filesystem_encoding())
|
||||||
|
return env
|
||||||
|
|
||||||
|
# HACK: The default implementations of os.path.expanduser from cpython do not decode
|
||||||
|
# environment variables with filesystem encoding. We will work around this by
|
||||||
|
# providing adjusted implementations.
|
||||||
|
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
||||||
|
# for different platforms with correct environment variables decoding.
|
||||||
|
|
||||||
|
if os.name == 'posix':
|
||||||
|
def compat_expanduser(path):
|
||||||
|
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
||||||
|
do nothing."""
|
||||||
|
if not path.startswith('~'):
|
||||||
|
return path
|
||||||
|
i = path.find('/', 1)
|
||||||
|
if i < 0:
|
||||||
|
i = len(path)
|
||||||
|
if i == 1:
|
||||||
|
if 'HOME' not in os.environ:
|
||||||
|
import pwd
|
||||||
|
userhome = pwd.getpwuid(os.getuid()).pw_dir
|
||||||
|
else:
|
||||||
|
userhome = compat_getenv('HOME')
|
||||||
|
else:
|
||||||
|
import pwd
|
||||||
|
try:
|
||||||
|
pwent = pwd.getpwnam(path[1:i])
|
||||||
|
except KeyError:
|
||||||
|
return path
|
||||||
|
userhome = pwent.pw_dir
|
||||||
|
userhome = userhome.rstrip('/')
|
||||||
|
return (userhome + path[i:]) or '/'
|
||||||
|
elif os.name == 'nt' or os.name == 'ce':
|
||||||
|
def compat_expanduser(path):
|
||||||
|
"""Expand ~ and ~user constructs.
|
||||||
|
|
||||||
|
If user or $HOME is unknown, do nothing."""
|
||||||
|
if path[:1] != '~':
|
||||||
|
return path
|
||||||
|
i, n = 1, len(path)
|
||||||
|
while i < n and path[i] not in '/\\':
|
||||||
|
i = i + 1
|
||||||
|
|
||||||
|
if 'HOME' in os.environ:
|
||||||
|
userhome = compat_getenv('HOME')
|
||||||
|
elif 'USERPROFILE' in os.environ:
|
||||||
|
userhome = compat_getenv('USERPROFILE')
|
||||||
|
elif 'HOMEPATH' not in os.environ:
|
||||||
|
return path
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
drive = compat_getenv('HOMEDRIVE')
|
||||||
|
except KeyError:
|
||||||
|
drive = ''
|
||||||
|
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
|
||||||
|
|
||||||
|
if i != 1: # ~user
|
||||||
|
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
|
||||||
|
|
||||||
|
return userhome + path[i:]
|
||||||
|
else:
|
||||||
|
compat_expanduser = os.path.expanduser
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info < (3, 0):
|
||||||
|
def compat_print(s):
|
||||||
|
from .utils import preferredencoding
|
||||||
|
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
|
||||||
|
else:
|
||||||
|
def compat_print(s):
|
||||||
|
assert isinstance(s, compat_str)
|
||||||
|
print(s)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess_check_output = subprocess.check_output
|
||||||
|
except AttributeError:
|
||||||
|
def subprocess_check_output(*args, **kwargs):
|
||||||
|
assert 'input' not in kwargs
|
||||||
|
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
|
||||||
|
output, _ = p.communicate()
|
||||||
|
ret = p.poll()
|
||||||
|
if ret:
|
||||||
|
raise subprocess.CalledProcessError(ret, p.args, output=output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
||||||
|
def compat_getpass(prompt, *args, **kwargs):
|
||||||
|
if isinstance(prompt, compat_str):
|
||||||
|
from .utils import preferredencoding
|
||||||
|
prompt = prompt.encode(preferredencoding())
|
||||||
|
return getpass.getpass(prompt, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
compat_getpass = getpass.getpass
|
||||||
|
|
||||||
|
# Old 2.6 and 2.7 releases require kwargs to be bytes
|
||||||
|
try:
|
||||||
|
def _testfunc(x):
|
||||||
|
pass
|
||||||
|
_testfunc(**{'x': 0})
|
||||||
|
except TypeError:
|
||||||
|
def compat_kwargs(kwargs):
|
||||||
|
return dict((bytes(k), v) for k, v in kwargs.items())
|
||||||
|
else:
|
||||||
|
compat_kwargs = lambda kwargs: kwargs
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info < (2, 7):
|
||||||
|
def compat_socket_create_connection(address, timeout, source_address=None):
|
||||||
|
host, port = address
|
||||||
|
err = None
|
||||||
|
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
|
||||||
|
af, socktype, proto, canonname, sa = res
|
||||||
|
sock = None
|
||||||
|
try:
|
||||||
|
sock = socket.socket(af, socktype, proto)
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
if source_address:
|
||||||
|
sock.bind(source_address)
|
||||||
|
sock.connect(sa)
|
||||||
|
return sock
|
||||||
|
except socket.error as _:
|
||||||
|
err = _
|
||||||
|
if sock is not None:
|
||||||
|
sock.close()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
else:
|
||||||
|
raise socket.error("getaddrinfo returns an empty list")
|
||||||
|
else:
|
||||||
|
compat_socket_create_connection = socket.create_connection
|
||||||
|
|
||||||
|
|
||||||
|
# Fix https://github.com/rg3/youtube-dl/issues/4223
|
||||||
|
# See http://bugs.python.org/issue9161 for what is broken
|
||||||
|
def workaround_optparse_bug9161():
|
||||||
|
op = optparse.OptionParser()
|
||||||
|
og = optparse.OptionGroup(op, 'foo')
|
||||||
|
try:
|
||||||
|
og.add_option('-t')
|
||||||
|
except TypeError:
|
||||||
|
real_add_option = optparse.OptionGroup.add_option
|
||||||
|
|
||||||
|
def _compat_add_option(self, *args, **kwargs):
|
||||||
|
enc = lambda v: (
|
||||||
|
v.encode('ascii', 'replace') if isinstance(v, compat_str)
|
||||||
|
else v)
|
||||||
|
bargs = [enc(a) for a in args]
|
||||||
|
bkwargs = dict(
|
||||||
|
(k, enc(v)) for k, v in kwargs.items())
|
||||||
|
return real_add_option(self, *bargs, **bkwargs)
|
||||||
|
optparse.OptionGroup.add_option = _compat_add_option
|
||||||
|
|
||||||
|
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
|
||||||
|
compat_get_terminal_size = shutil.get_terminal_size
|
||||||
|
else:
|
||||||
|
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
|
||||||
|
|
||||||
|
def compat_get_terminal_size():
|
||||||
|
columns = compat_getenv('COLUMNS', None)
|
||||||
|
if columns:
|
||||||
|
columns = int(columns)
|
||||||
|
else:
|
||||||
|
columns = None
|
||||||
|
lines = compat_getenv('LINES', None)
|
||||||
|
if lines:
|
||||||
|
lines = int(lines)
|
||||||
|
else:
|
||||||
|
lines = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
sp = subprocess.Popen(
|
||||||
|
['stty', 'size'],
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
out, err = sp.communicate()
|
||||||
|
lines, columns = map(int, out.split())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return _terminal_size(columns, lines)
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'compat_HTTPError',
|
||||||
|
'compat_basestring',
|
||||||
|
'compat_chr',
|
||||||
|
'compat_cookiejar',
|
||||||
|
'compat_expanduser',
|
||||||
|
'compat_get_terminal_size',
|
||||||
|
'compat_getenv',
|
||||||
|
'compat_getpass',
|
||||||
|
'compat_html_entities',
|
||||||
|
'compat_html_parser',
|
||||||
|
'compat_http_client',
|
||||||
|
'compat_http_server',
|
||||||
|
'compat_kwargs',
|
||||||
|
'compat_ord',
|
||||||
|
'compat_parse_qs',
|
||||||
|
'compat_print',
|
||||||
|
'compat_socket_create_connection',
|
||||||
|
'compat_str',
|
||||||
|
'compat_subprocess_get_DEVNULL',
|
||||||
|
'compat_urllib_error',
|
||||||
|
'compat_urllib_parse',
|
||||||
|
'compat_urllib_parse_unquote',
|
||||||
|
'compat_urllib_parse_urlparse',
|
||||||
|
'compat_urllib_request',
|
||||||
|
'compat_urlparse',
|
||||||
|
'compat_urlretrieve',
|
||||||
|
'compat_xml_parse_error',
|
||||||
|
'shlex_quote',
|
||||||
|
'subprocess_check_output',
|
||||||
|
'workaround_optparse_bug9161',
|
||||||
|
]
|
||||||
@@ -1,29 +1,46 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
|
from .external import get_external_downloader
|
||||||
|
from .f4m import F4mFD
|
||||||
from .hls import HlsFD
|
from .hls import HlsFD
|
||||||
|
from .hls import NativeHlsFD
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
from .mplayer import MplayerFD
|
from .mplayer import MplayerFD
|
||||||
from .rtmp import RtmpFD
|
from .rtmp import RtmpFD
|
||||||
from .f4m import F4mFD
|
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_protocol,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
PROTOCOL_MAP = {
|
||||||
|
'rtmp': RtmpFD,
|
||||||
|
'm3u8_native': NativeHlsFD,
|
||||||
|
'm3u8': HlsFD,
|
||||||
|
'mms': MplayerFD,
|
||||||
|
'rtsp': MplayerFD,
|
||||||
|
'f4m': F4mFD,
|
||||||
|
}
|
||||||
|
|
||||||
def get_suitable_downloader(info_dict):
|
|
||||||
|
def get_suitable_downloader(info_dict, params={}):
|
||||||
"""Get the downloader class that can handle the info dict."""
|
"""Get the downloader class that can handle the info dict."""
|
||||||
url = info_dict['url']
|
protocol = determine_protocol(info_dict)
|
||||||
protocol = info_dict.get('protocol')
|
info_dict['protocol'] = protocol
|
||||||
|
|
||||||
if url.startswith('rtmp'):
|
external_downloader = params.get('external_downloader')
|
||||||
return RtmpFD
|
if external_downloader is not None:
|
||||||
if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
|
ed = get_external_downloader(external_downloader)
|
||||||
return HlsFD
|
if ed.supports(info_dict):
|
||||||
if url.startswith('mms') or url.startswith('rtsp'):
|
return ed
|
||||||
return MplayerFD
|
|
||||||
if determine_ext(url) == 'f4m':
|
if protocol == 'm3u8' and params.get('hls_prefer_native'):
|
||||||
return F4mFD
|
return NativeHlsFD
|
||||||
else:
|
|
||||||
return HttpFD
|
return PROTOCOL_MAP.get(protocol, HttpFD)
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'get_suitable_downloader',
|
||||||
|
'FileDownloader',
|
||||||
|
]
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
|
from __future__ import division, unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_str,
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
@@ -38,10 +40,15 @@ class FileDownloader(object):
|
|||||||
test: Download only first bytes to test the downloader.
|
test: Download only first bytes to test the downloader.
|
||||||
min_filesize: Skip files smaller than this size
|
min_filesize: Skip files smaller than this size
|
||||||
max_filesize: Skip files larger than this size
|
max_filesize: Skip files larger than this size
|
||||||
|
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||||
|
(experimenatal)
|
||||||
|
external_downloader_args: A list of additional command-line arguments for the
|
||||||
|
external downloader.
|
||||||
|
|
||||||
Subclasses of this one must re-define the real_download method.
|
Subclasses of this one must re-define the real_download method.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_TEST_FILE_SIZE = 10241
|
||||||
params = None
|
params = None
|
||||||
|
|
||||||
def __init__(self, ydl, params):
|
def __init__(self, ydl, params):
|
||||||
@@ -49,6 +56,7 @@ class FileDownloader(object):
|
|||||||
self.ydl = ydl
|
self.ydl = ydl
|
||||||
self._progress_hooks = []
|
self._progress_hooks = []
|
||||||
self.params = params
|
self.params = params
|
||||||
|
self.add_progress_hook(self.report_progress)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def format_seconds(seconds):
|
def format_seconds(seconds):
|
||||||
@@ -77,6 +85,8 @@ class FileDownloader(object):
|
|||||||
def calc_eta(start, now, total, current):
|
def calc_eta(start, now, total, current):
|
||||||
if total is None:
|
if total is None:
|
||||||
return None
|
return None
|
||||||
|
if now is None:
|
||||||
|
now = time.time()
|
||||||
dif = now - start
|
dif = now - start
|
||||||
if current == 0 or dif < 0.001: # One millisecond
|
if current == 0 or dif < 0.001: # One millisecond
|
||||||
return None
|
return None
|
||||||
@@ -143,29 +153,30 @@ class FileDownloader(object):
|
|||||||
def report_error(self, *args, **kargs):
|
def report_error(self, *args, **kargs):
|
||||||
self.ydl.report_error(*args, **kargs)
|
self.ydl.report_error(*args, **kargs)
|
||||||
|
|
||||||
def slow_down(self, start_time, byte_counter):
|
def slow_down(self, start_time, now, byte_counter):
|
||||||
"""Sleep if the download speed is over the rate limit."""
|
"""Sleep if the download speed is over the rate limit."""
|
||||||
rate_limit = self.params.get('ratelimit', None)
|
rate_limit = self.params.get('ratelimit', None)
|
||||||
if rate_limit is None or byte_counter == 0:
|
if rate_limit is None or byte_counter == 0:
|
||||||
return
|
return
|
||||||
|
if now is None:
|
||||||
now = time.time()
|
now = time.time()
|
||||||
elapsed = now - start_time
|
elapsed = now - start_time
|
||||||
if elapsed <= 0.0:
|
if elapsed <= 0.0:
|
||||||
return
|
return
|
||||||
speed = float(byte_counter) / elapsed
|
speed = float(byte_counter) / elapsed
|
||||||
if speed > rate_limit:
|
if speed > rate_limit:
|
||||||
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
|
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
|
||||||
|
|
||||||
def temp_name(self, filename):
|
def temp_name(self, filename):
|
||||||
"""Returns a temporary filename for the given filename."""
|
"""Returns a temporary filename for the given filename."""
|
||||||
if self.params.get('nopart', False) or filename == u'-' or \
|
if self.params.get('nopart', False) or filename == '-' or \
|
||||||
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
||||||
return filename
|
return filename
|
||||||
return filename + u'.part'
|
return filename + '.part'
|
||||||
|
|
||||||
def undo_temp_name(self, filename):
|
def undo_temp_name(self, filename):
|
||||||
if filename.endswith(u'.part'):
|
if filename.endswith('.part'):
|
||||||
return filename[:-len(u'.part')]
|
return filename[:-len('.part')]
|
||||||
return filename
|
return filename
|
||||||
|
|
||||||
def try_rename(self, old_filename, new_filename):
|
def try_rename(self, old_filename, new_filename):
|
||||||
@@ -174,7 +185,7 @@ class FileDownloader(object):
|
|||||||
return
|
return
|
||||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||||
except (IOError, OSError) as err:
|
except (IOError, OSError) as err:
|
||||||
self.report_error(u'unable to rename file: %s' % compat_str(err))
|
self.report_error('unable to rename file: %s' % compat_str(err))
|
||||||
|
|
||||||
def try_utime(self, filename, last_modified_hdr):
|
def try_utime(self, filename, last_modified_hdr):
|
||||||
"""Try to set the last-modified time of the given file."""
|
"""Try to set the last-modified time of the given file."""
|
||||||
@@ -193,16 +204,16 @@ class FileDownloader(object):
|
|||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
os.utime(filename, (time.time(), filetime))
|
os.utime(filename, (time.time(), filetime))
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
return filetime
|
return filetime
|
||||||
|
|
||||||
def report_destination(self, filename):
|
def report_destination(self, filename):
|
||||||
"""Report destination filename."""
|
"""Report destination filename."""
|
||||||
self.to_screen(u'[download] Destination: ' + filename)
|
self.to_screen('[download] Destination: ' + filename)
|
||||||
|
|
||||||
def _report_progress_status(self, msg, is_last_line=False):
|
def _report_progress_status(self, msg, is_last_line=False):
|
||||||
fullmsg = u'[download] ' + msg
|
fullmsg = '[download] ' + msg
|
||||||
if self.params.get('progress_with_newline', False):
|
if self.params.get('progress_with_newline', False):
|
||||||
self.to_screen(fullmsg)
|
self.to_screen(fullmsg)
|
||||||
else:
|
else:
|
||||||
@@ -210,76 +221,110 @@ class FileDownloader(object):
|
|||||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
prev_len = getattr(self, '_report_progress_prev_line_length',
|
||||||
0)
|
0)
|
||||||
if prev_len > len(fullmsg):
|
if prev_len > len(fullmsg):
|
||||||
fullmsg += u' ' * (prev_len - len(fullmsg))
|
fullmsg += ' ' * (prev_len - len(fullmsg))
|
||||||
self._report_progress_prev_line_length = len(fullmsg)
|
self._report_progress_prev_line_length = len(fullmsg)
|
||||||
clear_line = u'\r'
|
clear_line = '\r'
|
||||||
else:
|
else:
|
||||||
clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
|
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
|
||||||
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
||||||
self.to_console_title(u'youtube-dl ' + msg)
|
self.to_console_title('youtube-dl ' + msg)
|
||||||
|
|
||||||
def report_progress(self, percent, data_len_str, speed, eta):
|
def report_progress(self, s):
|
||||||
"""Report download progress."""
|
if s['status'] == 'finished':
|
||||||
if self.params.get('noprogress', False):
|
if self.params.get('noprogress', False):
|
||||||
return
|
self.to_screen('[download] Download completed')
|
||||||
if eta is not None:
|
|
||||||
eta_str = self.format_eta(eta)
|
|
||||||
else:
|
else:
|
||||||
eta_str = 'Unknown ETA'
|
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||||
if percent is not None:
|
if s.get('elapsed') is not None:
|
||||||
percent_str = self.format_percent(percent)
|
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||||
else:
|
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
|
||||||
percent_str = 'Unknown %'
|
|
||||||
speed_str = self.format_speed(speed)
|
|
||||||
|
|
||||||
msg = (u'%s of %s at %s ETA %s' %
|
|
||||||
(percent_str, data_len_str, speed_str, eta_str))
|
|
||||||
self._report_progress_status(msg)
|
|
||||||
|
|
||||||
def report_progress_live_stream(self, downloaded_data_len, speed, elapsed):
|
|
||||||
if self.params.get('noprogress', False):
|
|
||||||
return
|
|
||||||
downloaded_str = format_bytes(downloaded_data_len)
|
|
||||||
speed_str = self.format_speed(speed)
|
|
||||||
elapsed_str = FileDownloader.format_seconds(elapsed)
|
|
||||||
msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
|
|
||||||
self._report_progress_status(msg)
|
|
||||||
|
|
||||||
def report_finish(self, data_len_str, tot_time):
|
|
||||||
"""Report download finished."""
|
|
||||||
if self.params.get('noprogress', False):
|
|
||||||
self.to_screen(u'[download] Download completed')
|
|
||||||
else:
|
else:
|
||||||
|
msg_template = '100%% of %(_total_bytes_str)s'
|
||||||
self._report_progress_status(
|
self._report_progress_status(
|
||||||
(u'100%% of %s in %s' %
|
msg_template % s, is_last_line=True)
|
||||||
(data_len_str, self.format_seconds(tot_time))),
|
|
||||||
is_last_line=True)
|
if self.params.get('noprogress'):
|
||||||
|
return
|
||||||
|
|
||||||
|
if s['status'] != 'downloading':
|
||||||
|
return
|
||||||
|
|
||||||
|
if s.get('eta') is not None:
|
||||||
|
s['_eta_str'] = self.format_eta(s['eta'])
|
||||||
|
else:
|
||||||
|
s['_eta_str'] = 'Unknown ETA'
|
||||||
|
|
||||||
|
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
|
||||||
|
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
|
||||||
|
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
|
||||||
|
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
|
||||||
|
else:
|
||||||
|
if s.get('downloaded_bytes') == 0:
|
||||||
|
s['_percent_str'] = self.format_percent(0)
|
||||||
|
else:
|
||||||
|
s['_percent_str'] = 'Unknown %'
|
||||||
|
|
||||||
|
if s.get('speed') is not None:
|
||||||
|
s['_speed_str'] = self.format_speed(s['speed'])
|
||||||
|
else:
|
||||||
|
s['_speed_str'] = 'Unknown speed'
|
||||||
|
|
||||||
|
if s.get('total_bytes') is not None:
|
||||||
|
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||||
|
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
|
||||||
|
elif s.get('total_bytes_estimate') is not None:
|
||||||
|
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
|
||||||
|
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
|
||||||
|
else:
|
||||||
|
if s.get('downloaded_bytes') is not None:
|
||||||
|
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
|
||||||
|
if s.get('elapsed'):
|
||||||
|
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||||
|
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
|
||||||
|
else:
|
||||||
|
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
|
||||||
|
else:
|
||||||
|
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
|
||||||
|
|
||||||
|
self._report_progress_status(msg_template % s)
|
||||||
|
|
||||||
def report_resuming_byte(self, resume_len):
|
def report_resuming_byte(self, resume_len):
|
||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
|
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
||||||
|
|
||||||
def report_retry(self, count, retries):
|
def report_retry(self, count, retries):
|
||||||
"""Report retry in case of HTTP error 5xx"""
|
"""Report retry in case of HTTP error 5xx"""
|
||||||
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, file_name):
|
||||||
"""Report file has already been fully downloaded."""
|
"""Report file has already been fully downloaded."""
|
||||||
try:
|
try:
|
||||||
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
self.to_screen('[download] %s has already been downloaded' % file_name)
|
||||||
except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
self.to_screen(u'[download] The file has already been downloaded')
|
self.to_screen('[download] The file has already been downloaded')
|
||||||
|
|
||||||
def report_unable_to_resume(self):
|
def report_unable_to_resume(self):
|
||||||
"""Report it was impossible to resume download."""
|
"""Report it was impossible to resume download."""
|
||||||
self.to_screen(u'[download] Unable to resume')
|
self.to_screen('[download] Unable to resume')
|
||||||
|
|
||||||
def download(self, filename, info_dict):
|
def download(self, filename, info_dict):
|
||||||
"""Download to a filename using the info from info_dict
|
"""Download to a filename using the info from info_dict
|
||||||
Return True on success and False otherwise
|
Return True on success and False otherwise
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
nooverwrites_and_exists = (
|
||||||
|
self.params.get('nooverwrites', False) and
|
||||||
|
os.path.exists(encodeFilename(filename))
|
||||||
|
)
|
||||||
|
|
||||||
|
continuedl_and_exists = (
|
||||||
|
self.params.get('continuedl', True) and
|
||||||
|
os.path.isfile(encodeFilename(filename)) and
|
||||||
|
not self.params.get('nopart', False)
|
||||||
|
)
|
||||||
|
|
||||||
# Check file already present
|
# Check file already present
|
||||||
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
|
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
|
||||||
self.report_file_already_downloaded(filename)
|
self.report_file_already_downloaded(filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
@@ -288,30 +333,43 @@ class FileDownloader(object):
|
|||||||
})
|
})
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
sleep_interval = self.params.get('sleep_interval')
|
||||||
|
if sleep_interval:
|
||||||
|
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
|
||||||
|
time.sleep(sleep_interval)
|
||||||
|
|
||||||
return self.real_download(filename, info_dict)
|
return self.real_download(filename, info_dict)
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
"""Real download process. Redefine in subclasses."""
|
"""Real download process. Redefine in subclasses."""
|
||||||
raise NotImplementedError(u'This method must be implemented by sublcasses')
|
raise NotImplementedError('This method must be implemented by subclasses')
|
||||||
|
|
||||||
def _hook_progress(self, status):
|
def _hook_progress(self, status):
|
||||||
for ph in self._progress_hooks:
|
for ph in self._progress_hooks:
|
||||||
ph(status)
|
ph(status)
|
||||||
|
|
||||||
def add_progress_hook(self, ph):
|
def add_progress_hook(self, ph):
|
||||||
""" ph gets called on download progress, with a dictionary with the entries
|
# See YoutubeDl.py (search for progress_hooks) for a description of
|
||||||
* filename: The final filename
|
# this interface
|
||||||
* status: One of "downloading" and "finished"
|
|
||||||
|
|
||||||
It can also have some of the following entries:
|
|
||||||
|
|
||||||
* downloaded_bytes: Bytes on disks
|
|
||||||
* total_bytes: Total bytes, None if unknown
|
|
||||||
* tmpfilename: The filename we're currently writing to
|
|
||||||
* eta: The estimated time in seconds, None if unknown
|
|
||||||
* speed: The download speed in bytes/second, None if unknown
|
|
||||||
|
|
||||||
Hooks are guaranteed to be called at least once (with status "finished")
|
|
||||||
if the download is successful.
|
|
||||||
"""
|
|
||||||
self._progress_hooks.append(ph)
|
self._progress_hooks.append(ph)
|
||||||
|
|
||||||
|
def _debug_cmd(self, args, subprocess_encoding, exe=None):
|
||||||
|
if not self.params.get('verbose', False):
|
||||||
|
return
|
||||||
|
|
||||||
|
if exe is None:
|
||||||
|
exe = os.path.basename(args[0])
|
||||||
|
|
||||||
|
if subprocess_encoding:
|
||||||
|
str_args = [
|
||||||
|
a.decode(subprocess_encoding) if isinstance(a, bytes) else a
|
||||||
|
for a in args]
|
||||||
|
else:
|
||||||
|
str_args = args
|
||||||
|
try:
|
||||||
|
import pipes
|
||||||
|
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
|
||||||
|
except ImportError:
|
||||||
|
shell_quote = repr
|
||||||
|
self.to_screen('[debug] %s command line: %s' % (
|
||||||
|
exe, shell_quote(str_args)))
|
||||||
|
|||||||
135
youtube_dl/downloader/external.py
Normal file
135
youtube_dl/downloader/external.py
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from ..utils import (
|
||||||
|
encodeFilename,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ExternalFD(FileDownloader):
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
self.report_destination(filename)
|
||||||
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
|
retval = self._call_downloader(tmpfilename, info_dict)
|
||||||
|
if retval == 0:
|
||||||
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
|
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize))
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': fsize,
|
||||||
|
'total_bytes': fsize,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.to_stderr('\n')
|
||||||
|
self.report_error('%s exited with code %d' % (
|
||||||
|
self.get_basename(), retval))
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_basename(cls):
|
||||||
|
return cls.__name__[:-2].lower()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exe(self):
|
||||||
|
return self.params.get('external_downloader')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def supports(cls, info_dict):
|
||||||
|
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
|
||||||
|
|
||||||
|
def _source_address(self, command_option):
|
||||||
|
source_address = self.params.get('source_address')
|
||||||
|
if source_address is None:
|
||||||
|
return []
|
||||||
|
return [command_option, source_address]
|
||||||
|
|
||||||
|
def _configuration_args(self, default=[]):
|
||||||
|
ex_args = self.params.get('external_downloader_args')
|
||||||
|
if ex_args is None:
|
||||||
|
return default
|
||||||
|
assert isinstance(ex_args, list)
|
||||||
|
return ex_args
|
||||||
|
|
||||||
|
def _call_downloader(self, tmpfilename, info_dict):
|
||||||
|
""" Either overwrite this or implement _make_cmd """
|
||||||
|
cmd = self._make_cmd(tmpfilename, info_dict)
|
||||||
|
|
||||||
|
if sys.platform == 'win32' and sys.version_info < (3, 0):
|
||||||
|
# Windows subprocess module does not actually support Unicode
|
||||||
|
# on Python 2.x
|
||||||
|
# See http://stackoverflow.com/a/9951851/35070
|
||||||
|
subprocess_encoding = sys.getfilesystemencoding()
|
||||||
|
cmd = [a.encode(subprocess_encoding, 'ignore') for a in cmd]
|
||||||
|
else:
|
||||||
|
subprocess_encoding = None
|
||||||
|
self._debug_cmd(cmd, subprocess_encoding)
|
||||||
|
|
||||||
|
p = subprocess.Popen(
|
||||||
|
cmd, stderr=subprocess.PIPE)
|
||||||
|
_, stderr = p.communicate()
|
||||||
|
if p.returncode != 0:
|
||||||
|
self.to_stderr(stderr)
|
||||||
|
return p.returncode
|
||||||
|
|
||||||
|
|
||||||
|
class CurlFD(ExternalFD):
|
||||||
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
|
cmd = [self.exe, '--location', '-o', tmpfilename]
|
||||||
|
for key, val in info_dict['http_headers'].items():
|
||||||
|
cmd += ['--header', '%s: %s' % (key, val)]
|
||||||
|
cmd += self._source_address('--interface')
|
||||||
|
cmd += self._configuration_args()
|
||||||
|
cmd += ['--', info_dict['url']]
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
class WgetFD(ExternalFD):
|
||||||
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
|
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
|
||||||
|
for key, val in info_dict['http_headers'].items():
|
||||||
|
cmd += ['--header', '%s: %s' % (key, val)]
|
||||||
|
cmd += self._source_address('--bind-address')
|
||||||
|
cmd += self._configuration_args()
|
||||||
|
cmd += ['--', info_dict['url']]
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
|
class Aria2cFD(ExternalFD):
|
||||||
|
def _make_cmd(self, tmpfilename, info_dict):
|
||||||
|
cmd = [self.exe, '-c']
|
||||||
|
cmd += self._configuration_args([
|
||||||
|
'--min-split-size', '1M', '--max-connection-per-server', '4'])
|
||||||
|
dn = os.path.dirname(tmpfilename)
|
||||||
|
if dn:
|
||||||
|
cmd += ['--dir', dn]
|
||||||
|
cmd += ['--out', os.path.basename(tmpfilename)]
|
||||||
|
for key, val in info_dict['http_headers'].items():
|
||||||
|
cmd += ['--header', '%s: %s' % (key, val)]
|
||||||
|
cmd += self._source_address('--interface')
|
||||||
|
cmd += ['--', info_dict['url']]
|
||||||
|
return cmd
|
||||||
|
|
||||||
|
_BY_NAME = dict(
|
||||||
|
(klass.get_basename(), klass)
|
||||||
|
for name, klass in globals().items()
|
||||||
|
if name.endswith('FD') and name != 'ExternalFD'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def list_external_downloaders():
|
||||||
|
return sorted(_BY_NAME.keys())
|
||||||
|
|
||||||
|
|
||||||
|
def get_external_downloader(external_downloader):
|
||||||
|
""" Given the name of the executable, see whether we support the given
|
||||||
|
downloader . """
|
||||||
|
bn = os.path.basename(external_downloader)
|
||||||
|
return _BY_NAME[bn]
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import division, unicode_literals
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
import io
|
import io
|
||||||
@@ -9,13 +9,16 @@ import xml.etree.ElementTree as etree
|
|||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
|
from ..compat import (
|
||||||
|
compat_urlparse,
|
||||||
|
compat_urllib_error,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
struct_pack,
|
struct_pack,
|
||||||
struct_unpack,
|
struct_unpack,
|
||||||
compat_urlparse,
|
|
||||||
format_bytes,
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
|
xpath_text,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -119,7 +122,8 @@ class FlvReader(io.BytesIO):
|
|||||||
|
|
||||||
self.read_unsigned_int() # BootstrapinfoVersion
|
self.read_unsigned_int() # BootstrapinfoVersion
|
||||||
# Profile,Live,Update,Reserved
|
# Profile,Live,Update,Reserved
|
||||||
self.read(1)
|
flags = self.read_unsigned_char()
|
||||||
|
live = flags & 0x20 != 0
|
||||||
# time scale
|
# time scale
|
||||||
self.read_unsigned_int()
|
self.read_unsigned_int()
|
||||||
# CurrentMediaTime
|
# CurrentMediaTime
|
||||||
@@ -158,6 +162,7 @@ class FlvReader(io.BytesIO):
|
|||||||
return {
|
return {
|
||||||
'segments': segments,
|
'segments': segments,
|
||||||
'fragments': fragments,
|
'fragments': fragments,
|
||||||
|
'live': live,
|
||||||
}
|
}
|
||||||
|
|
||||||
def read_bootstrap_info(self):
|
def read_bootstrap_info(self):
|
||||||
@@ -174,34 +179,47 @@ def build_fragments_list(boot_info):
|
|||||||
""" Return a list of (segment, fragment) for each fragment in the video """
|
""" Return a list of (segment, fragment) for each fragment in the video """
|
||||||
res = []
|
res = []
|
||||||
segment_run_table = boot_info['segments'][0]
|
segment_run_table = boot_info['segments'][0]
|
||||||
# I've only found videos with one segment
|
|
||||||
segment_run_entry = segment_run_table['segment_run'][0]
|
|
||||||
n_frags = segment_run_entry[1]
|
|
||||||
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
||||||
first_frag_number = fragment_run_entry_table[0]['first']
|
first_frag_number = fragment_run_entry_table[0]['first']
|
||||||
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
|
fragments_counter = itertools.count(first_frag_number)
|
||||||
res.append((1, frag_number))
|
for segment, fragments_count in segment_run_table['segment_run']:
|
||||||
|
for _ in range(fragments_count):
|
||||||
|
res.append((segment, next(fragments_counter)))
|
||||||
|
|
||||||
|
if boot_info['live']:
|
||||||
|
res = res[-2:]
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def write_flv_header(stream, metadata):
|
def write_unsigned_int(stream, val):
|
||||||
"""Writes the FLV header and the metadata to stream"""
|
stream.write(struct_pack('!I', val))
|
||||||
|
|
||||||
|
|
||||||
|
def write_unsigned_int_24(stream, val):
|
||||||
|
stream.write(struct_pack('!I', val)[1:])
|
||||||
|
|
||||||
|
|
||||||
|
def write_flv_header(stream):
|
||||||
|
"""Writes the FLV header to stream"""
|
||||||
# FLV header
|
# FLV header
|
||||||
stream.write(b'FLV\x01')
|
stream.write(b'FLV\x01')
|
||||||
stream.write(b'\x05')
|
stream.write(b'\x05')
|
||||||
stream.write(b'\x00\x00\x00\x09')
|
stream.write(b'\x00\x00\x00\x09')
|
||||||
# FLV File body
|
|
||||||
stream.write(b'\x00\x00\x00\x00')
|
stream.write(b'\x00\x00\x00\x00')
|
||||||
# FLVTAG
|
|
||||||
# Script data
|
|
||||||
stream.write(b'\x12')
|
def write_metadata_tag(stream, metadata):
|
||||||
# Size of the metadata with 3 bytes
|
"""Writes optional metadata tag to stream"""
|
||||||
stream.write(struct_pack('!L', len(metadata))[1:])
|
SCRIPT_TAG = b'\x12'
|
||||||
|
FLV_TAG_HEADER_LEN = 11
|
||||||
|
|
||||||
|
if metadata:
|
||||||
|
stream.write(SCRIPT_TAG)
|
||||||
|
write_unsigned_int_24(stream, len(metadata))
|
||||||
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
|
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
|
||||||
stream.write(metadata)
|
stream.write(metadata)
|
||||||
# Magic numbers extracted from the output files produced by AdobeHDS.php
|
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
|
||||||
#(https://github.com/K-S-V/Scripts)
|
|
||||||
stream.write(b'\x00\x00\x01\x73')
|
|
||||||
|
|
||||||
|
|
||||||
def _add_ns(prop):
|
def _add_ns(prop):
|
||||||
@@ -218,72 +236,161 @@ class F4mFD(FileDownloader):
|
|||||||
A downloader for f4m manifests or AdobeHDS.
|
A downloader for f4m manifests or AdobeHDS.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def _get_unencrypted_media(self, doc):
|
||||||
|
media = doc.findall(_add_ns('media'))
|
||||||
|
if not media:
|
||||||
|
self.report_error('No media found')
|
||||||
|
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
|
||||||
|
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
|
||||||
|
# If id attribute is missing it's valid for all media nodes
|
||||||
|
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
|
||||||
|
if 'id' not in e.attrib:
|
||||||
|
self.report_error('Missing ID in f4m DRM')
|
||||||
|
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
|
||||||
|
'drmAdditionalHeaderSetId' not in e.attrib,
|
||||||
|
media))
|
||||||
|
if not media:
|
||||||
|
self.report_error('Unsupported DRM')
|
||||||
|
return media
|
||||||
|
|
||||||
|
def _get_bootstrap_from_url(self, bootstrap_url):
|
||||||
|
bootstrap = self.ydl.urlopen(bootstrap_url).read()
|
||||||
|
return read_bootstrap_info(bootstrap)
|
||||||
|
|
||||||
|
def _update_live_fragments(self, bootstrap_url, latest_fragment):
|
||||||
|
fragments_list = []
|
||||||
|
retries = 30
|
||||||
|
while (not fragments_list) and (retries > 0):
|
||||||
|
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
||||||
|
fragments_list = build_fragments_list(boot_info)
|
||||||
|
fragments_list = [f for f in fragments_list if f[1] > latest_fragment]
|
||||||
|
if not fragments_list:
|
||||||
|
# Retry after a while
|
||||||
|
time.sleep(5.0)
|
||||||
|
retries -= 1
|
||||||
|
|
||||||
|
if not fragments_list:
|
||||||
|
self.report_error('Failed to update fragments')
|
||||||
|
|
||||||
|
return fragments_list
|
||||||
|
|
||||||
|
def _parse_bootstrap_node(self, node, base_url):
|
||||||
|
if node.text is None:
|
||||||
|
bootstrap_url = compat_urlparse.urljoin(
|
||||||
|
base_url, node.attrib['url'])
|
||||||
|
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
||||||
|
else:
|
||||||
|
bootstrap_url = None
|
||||||
|
bootstrap = base64.b64decode(node.text.encode('ascii'))
|
||||||
|
boot_info = read_bootstrap_info(bootstrap)
|
||||||
|
return (boot_info, bootstrap_url)
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
man_url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
|
requested_bitrate = info_dict.get('tbr')
|
||||||
self.to_screen('[download] Downloading f4m manifest')
|
self.to_screen('[download] Downloading f4m manifest')
|
||||||
manifest = self.ydl.urlopen(man_url).read()
|
manifest = self.ydl.urlopen(man_url).read()
|
||||||
self.report_destination(filename)
|
|
||||||
http_dl = HttpQuietDownloader(self.ydl,
|
|
||||||
{
|
|
||||||
'continuedl': True,
|
|
||||||
'quiet': True,
|
|
||||||
'noprogress': True,
|
|
||||||
'test': self.params.get('test', False),
|
|
||||||
})
|
|
||||||
|
|
||||||
doc = etree.fromstring(manifest)
|
doc = etree.fromstring(manifest)
|
||||||
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
|
formats = [(int(f.attrib.get('bitrate', -1)), f)
|
||||||
|
for f in self._get_unencrypted_media(doc)]
|
||||||
|
if requested_bitrate is None:
|
||||||
|
# get the best format
|
||||||
formats = sorted(formats, key=lambda f: f[0])
|
formats = sorted(formats, key=lambda f: f[0])
|
||||||
rate, media = formats[-1]
|
rate, media = formats[-1]
|
||||||
|
else:
|
||||||
|
rate, media = list(filter(
|
||||||
|
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
||||||
|
|
||||||
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
||||||
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
|
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
||||||
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
|
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, base_url)
|
||||||
boot_info = read_bootstrap_info(bootstrap)
|
live = boot_info['live']
|
||||||
|
metadata_node = media.find(_add_ns('metadata'))
|
||||||
|
if metadata_node is not None:
|
||||||
|
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
|
||||||
|
else:
|
||||||
|
metadata = None
|
||||||
|
|
||||||
fragments_list = build_fragments_list(boot_info)
|
fragments_list = build_fragments_list(boot_info)
|
||||||
if self.params.get('test', False):
|
if self.params.get('test', False):
|
||||||
# We only download the first fragment
|
# We only download the first fragment
|
||||||
fragments_list = fragments_list[:1]
|
fragments_list = fragments_list[:1]
|
||||||
total_frags = len(fragments_list)
|
total_frags = len(fragments_list)
|
||||||
|
# For some akamai manifests we'll need to add a query to the fragment url
|
||||||
|
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
|
||||||
|
|
||||||
|
self.report_destination(filename)
|
||||||
|
http_dl = HttpQuietDownloader(
|
||||||
|
self.ydl,
|
||||||
|
{
|
||||||
|
'continuedl': True,
|
||||||
|
'quiet': True,
|
||||||
|
'noprogress': True,
|
||||||
|
'ratelimit': self.params.get('ratelimit', None),
|
||||||
|
'test': self.params.get('test', False),
|
||||||
|
}
|
||||||
|
)
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
||||||
write_flv_header(dest_stream, metadata)
|
|
||||||
|
write_flv_header(dest_stream)
|
||||||
|
if not live:
|
||||||
|
write_metadata_tag(dest_stream, metadata)
|
||||||
|
|
||||||
# This dict stores the download progress, it's updated by the progress
|
# This dict stores the download progress, it's updated by the progress
|
||||||
# hook
|
# hook
|
||||||
state = {
|
state = {
|
||||||
|
'status': 'downloading',
|
||||||
'downloaded_bytes': 0,
|
'downloaded_bytes': 0,
|
||||||
'frag_counter': 0,
|
'frag_index': 0,
|
||||||
|
'frag_count': total_frags,
|
||||||
|
'filename': filename,
|
||||||
|
'tmpfilename': tmpfilename,
|
||||||
}
|
}
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
def frag_progress_hook(status):
|
def frag_progress_hook(s):
|
||||||
frag_total_bytes = status.get('total_bytes', 0)
|
if s['status'] not in ('downloading', 'finished'):
|
||||||
estimated_size = (state['downloaded_bytes'] +
|
return
|
||||||
(total_frags - state['frag_counter']) * frag_total_bytes)
|
|
||||||
if status['status'] == 'finished':
|
frag_total_bytes = s.get('total_bytes', 0)
|
||||||
|
if s['status'] == 'finished':
|
||||||
state['downloaded_bytes'] += frag_total_bytes
|
state['downloaded_bytes'] += frag_total_bytes
|
||||||
state['frag_counter'] += 1
|
state['frag_index'] += 1
|
||||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
|
||||||
byte_counter = state['downloaded_bytes']
|
estimated_size = (
|
||||||
|
(state['downloaded_bytes'] + frag_total_bytes) /
|
||||||
|
(state['frag_index'] + 1) * total_frags)
|
||||||
|
time_now = time.time()
|
||||||
|
state['total_bytes_estimate'] = estimated_size
|
||||||
|
state['elapsed'] = time_now - start
|
||||||
|
|
||||||
|
if s['status'] == 'finished':
|
||||||
|
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||||
else:
|
else:
|
||||||
frag_downloaded_bytes = status['downloaded_bytes']
|
frag_downloaded_bytes = s['downloaded_bytes']
|
||||||
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
|
|
||||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||||
frag_total_bytes)
|
frag_total_bytes)
|
||||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||||
progress += frag_progress / float(total_frags)
|
progress += frag_progress / float(total_frags)
|
||||||
|
|
||||||
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
|
state['eta'] = self.calc_eta(
|
||||||
self.report_progress(progress, format_bytes(estimated_size),
|
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
|
||||||
status.get('speed'), eta)
|
state['speed'] = s.get('speed')
|
||||||
|
self._hook_progress(state)
|
||||||
|
|
||||||
http_dl.add_progress_hook(frag_progress_hook)
|
http_dl.add_progress_hook(frag_progress_hook)
|
||||||
|
|
||||||
frags_filenames = []
|
frags_filenames = []
|
||||||
for (seg_i, frag_i) in fragments_list:
|
while fragments_list:
|
||||||
|
seg_i, frag_i = fragments_list.pop(0)
|
||||||
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
||||||
url = base_url + name
|
url = base_url + name
|
||||||
|
if akamai_pv:
|
||||||
|
url += '?' + akamai_pv.strip(';')
|
||||||
frag_filename = '%s-%s' % (tmpfilename, name)
|
frag_filename = '%s-%s' % (tmpfilename, name)
|
||||||
|
try:
|
||||||
success = http_dl.download(frag_filename, {'url': url})
|
success = http_dl.download(frag_filename, {'url': url})
|
||||||
if not success:
|
if not success:
|
||||||
return False
|
return False
|
||||||
@@ -295,11 +402,30 @@ class F4mFD(FileDownloader):
|
|||||||
if box_type == b'mdat':
|
if box_type == b'mdat':
|
||||||
dest_stream.write(box_data)
|
dest_stream.write(box_data)
|
||||||
break
|
break
|
||||||
|
if live:
|
||||||
|
os.remove(frag_filename)
|
||||||
|
else:
|
||||||
frags_filenames.append(frag_filename)
|
frags_filenames.append(frag_filename)
|
||||||
|
except (compat_urllib_error.HTTPError, ) as err:
|
||||||
|
if live and (err.code == 404 or err.code == 410):
|
||||||
|
# We didn't keep up with the live window. Continue
|
||||||
|
# with the next available fragment.
|
||||||
|
msg = 'Fragment %d unavailable' % frag_i
|
||||||
|
self.report_warning(msg)
|
||||||
|
fragments_list = []
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
if not fragments_list and live and bootstrap_url:
|
||||||
|
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
|
||||||
|
total_frags += len(fragments_list)
|
||||||
|
if fragments_list and (fragments_list[0][1] > frag_i + 1):
|
||||||
|
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
||||||
|
self.report_warning(msg)
|
||||||
|
|
||||||
dest_stream.close()
|
dest_stream.close()
|
||||||
self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start)
|
|
||||||
|
|
||||||
|
elapsed = time.time() - start
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
for frag_file in frags_filenames:
|
for frag_file in frags_filenames:
|
||||||
os.remove(frag_file)
|
os.remove(frag_file)
|
||||||
@@ -310,6 +436,7 @@ class F4mFD(FileDownloader):
|
|||||||
'total_bytes': fsize,
|
'total_bytes': fsize,
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
|
'elapsed': elapsed,
|
||||||
})
|
})
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -1,8 +1,17 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
from ..postprocessor.ffmpeg import FFmpegPostProcessor
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
|
from ..compat import (
|
||||||
|
compat_urlparse,
|
||||||
|
compat_urllib_request,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
encodeArgument,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -13,25 +22,21 @@ class HlsFD(FileDownloader):
|
|||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
|
ffpp = FFmpegPostProcessor(downloader=self)
|
||||||
|
if not ffpp.available:
|
||||||
|
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
|
||||||
|
return False
|
||||||
|
ffpp.check_version()
|
||||||
|
|
||||||
args = [
|
args = [
|
||||||
'-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
encodeArgument(opt)
|
||||||
'-bsf:a', 'aac_adtstoasc',
|
for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
|
||||||
encodeFilename(tmpfilename, for_subprocess=True)]
|
args.append(encodeFilename(tmpfilename, True))
|
||||||
|
|
||||||
for program in ['avconv', 'ffmpeg']:
|
retval = subprocess.call(args)
|
||||||
try:
|
|
||||||
subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
|
||||||
break
|
|
||||||
except (OSError, IOError):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
|
|
||||||
cmd = [program] + args
|
|
||||||
|
|
||||||
retval = subprocess.call(cmd)
|
|
||||||
if retval == 0:
|
if retval == 0:
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
|
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': fsize,
|
'downloaded_bytes': fsize,
|
||||||
@@ -41,6 +46,59 @@ class HlsFD(FileDownloader):
|
|||||||
})
|
})
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.to_stderr(u"\n")
|
self.to_stderr('\n')
|
||||||
self.report_error(u'ffmpeg exited with code %d' % retval)
|
self.report_error('%s exited with code %d' % (ffpp.basename, retval))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class NativeHlsFD(FileDownloader):
|
||||||
|
""" A more limited implementation that does not require ffmpeg """
|
||||||
|
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
url = info_dict['url']
|
||||||
|
self.report_destination(filename)
|
||||||
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
|
self.to_screen(
|
||||||
|
'[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
|
||||||
|
data = self.ydl.urlopen(url).read()
|
||||||
|
s = data.decode('utf-8', 'ignore')
|
||||||
|
segment_urls = []
|
||||||
|
for line in s.splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if line and not line.startswith('#'):
|
||||||
|
segment_url = (
|
||||||
|
line
|
||||||
|
if re.match(r'^https?://', line)
|
||||||
|
else compat_urlparse.urljoin(url, line))
|
||||||
|
segment_urls.append(segment_url)
|
||||||
|
|
||||||
|
is_test = self.params.get('test', False)
|
||||||
|
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||||
|
byte_counter = 0
|
||||||
|
with open(tmpfilename, 'wb') as outf:
|
||||||
|
for i, segurl in enumerate(segment_urls):
|
||||||
|
self.to_screen(
|
||||||
|
'[hlsnative] %s: Downloading segment %d / %d' %
|
||||||
|
(info_dict['id'], i + 1, len(segment_urls)))
|
||||||
|
seg_req = compat_urllib_request.Request(segurl)
|
||||||
|
if remaining_bytes is not None:
|
||||||
|
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||||
|
|
||||||
|
segment = self.ydl.urlopen(seg_req).read()
|
||||||
|
if remaining_bytes is not None:
|
||||||
|
segment = segment[:remaining_bytes]
|
||||||
|
remaining_bytes -= len(segment)
|
||||||
|
outf.write(segment)
|
||||||
|
byte_counter += len(segment)
|
||||||
|
if remaining_bytes is not None and remaining_bytes <= 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
self._hook_progress({
|
||||||
|
'downloaded_bytes': byte_counter,
|
||||||
|
'total_bytes': byte_counter,
|
||||||
|
'filename': filename,
|
||||||
|
'status': 'finished',
|
||||||
|
})
|
||||||
|
self.try_rename(tmpfilename, filename)
|
||||||
|
return True
|
||||||
|
|||||||
@@ -1,21 +1,23 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import errno
|
||||||
import os
|
import os
|
||||||
|
import socket
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
format_bytes,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class HttpFD(FileDownloader):
|
class HttpFD(FileDownloader):
|
||||||
_TEST_FILE_SIZE = 10241
|
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
@@ -23,12 +25,16 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
# Do not include the Accept-Encoding header
|
# Do not include the Accept-Encoding header
|
||||||
headers = {'Youtubedl-no-compression': 'True'}
|
headers = {'Youtubedl-no-compression': 'True'}
|
||||||
if 'user_agent' in info_dict:
|
add_headers = info_dict.get('http_headers')
|
||||||
headers['Youtubedl-user-agent'] = info_dict['user_agent']
|
if add_headers:
|
||||||
if 'http_referer' in info_dict:
|
headers.update(add_headers)
|
||||||
headers['Referer'] = info_dict['http_referer']
|
data = info_dict.get('http_post_data')
|
||||||
basic_request = compat_urllib_request.Request(url, None, headers)
|
http_method = info_dict.get('http_method')
|
||||||
request = compat_urllib_request.Request(url, None, headers)
|
basic_request = compat_urllib_request.Request(url, data, headers)
|
||||||
|
request = compat_urllib_request.Request(url, data, headers)
|
||||||
|
if http_method is not None:
|
||||||
|
basic_request.get_method = lambda: http_method
|
||||||
|
request.get_method = lambda: http_method
|
||||||
|
|
||||||
is_test = self.params.get('test', False)
|
is_test = self.params.get('test', False)
|
||||||
|
|
||||||
@@ -43,7 +49,7 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
open_mode = 'wb'
|
open_mode = 'wb'
|
||||||
if resume_len != 0:
|
if resume_len != 0:
|
||||||
if self.params.get('continuedl', False):
|
if self.params.get('continuedl', True):
|
||||||
self.report_resuming_byte(resume_len)
|
self.report_resuming_byte(resume_len)
|
||||||
request.add_header('Range', 'bytes=%d-' % resume_len)
|
request.add_header('Range', 'bytes=%d-' % resume_len)
|
||||||
open_mode = 'ab'
|
open_mode = 'ab'
|
||||||
@@ -86,6 +92,8 @@ class HttpFD(FileDownloader):
|
|||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
|
'downloaded_bytes': resume_len,
|
||||||
|
'total_bytes': resume_len,
|
||||||
})
|
})
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
@@ -94,13 +102,18 @@ class HttpFD(FileDownloader):
|
|||||||
resume_len = 0
|
resume_len = 0
|
||||||
open_mode = 'wb'
|
open_mode = 'wb'
|
||||||
break
|
break
|
||||||
|
except socket.error as e:
|
||||||
|
if e.errno != errno.ECONNRESET:
|
||||||
|
# Connection reset is no problem, just retry
|
||||||
|
raise
|
||||||
|
|
||||||
# Retry
|
# Retry
|
||||||
count += 1
|
count += 1
|
||||||
if count <= retries:
|
if count <= retries:
|
||||||
self.report_retry(count, retries)
|
self.report_retry(count, retries)
|
||||||
|
|
||||||
if count > retries:
|
if count > retries:
|
||||||
self.report_error(u'giving up after %s retries' % retries)
|
self.report_error('giving up after %s retries' % retries)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
data_len = data.info().get('Content-length', None)
|
data_len = data.info().get('Content-length', None)
|
||||||
@@ -118,26 +131,30 @@ class HttpFD(FileDownloader):
|
|||||||
min_data_len = self.params.get("min_filesize", None)
|
min_data_len = self.params.get("min_filesize", None)
|
||||||
max_data_len = self.params.get("max_filesize", None)
|
max_data_len = self.params.get("max_filesize", None)
|
||||||
if min_data_len is not None and data_len < min_data_len:
|
if min_data_len is not None and data_len < min_data_len:
|
||||||
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||||
return False
|
return False
|
||||||
if max_data_len is not None and data_len > max_data_len:
|
if max_data_len is not None and data_len > max_data_len:
|
||||||
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
data_len_str = format_bytes(data_len)
|
|
||||||
byte_counter = 0 + resume_len
|
byte_counter = 0 + resume_len
|
||||||
block_size = self.params.get('buffersize', 1024)
|
block_size = self.params.get('buffersize', 1024)
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
|
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
|
||||||
|
now = None # needed for slow_down() in the first loop run
|
||||||
|
before = start # start measuring
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
# Download and write
|
# Download and write
|
||||||
before = time.time()
|
|
||||||
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
|
data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
|
||||||
after = time.time()
|
|
||||||
if len(data_block) == 0:
|
|
||||||
break
|
|
||||||
byte_counter += len(data_block)
|
byte_counter += len(data_block)
|
||||||
|
|
||||||
# Open file just in time
|
# exit loop when download is finished
|
||||||
|
if len(data_block) == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Open destination file just in time
|
||||||
if stream is None:
|
if stream is None:
|
||||||
try:
|
try:
|
||||||
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
|
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
|
||||||
@@ -145,48 +162,64 @@ class HttpFD(FileDownloader):
|
|||||||
filename = self.undo_temp_name(tmpfilename)
|
filename = self.undo_temp_name(tmpfilename)
|
||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
except (OSError, IOError) as err:
|
except (OSError, IOError) as err:
|
||||||
self.report_error(u'unable to open for writing: %s' % str(err))
|
self.report_error('unable to open for writing: %s' % str(err))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
||||||
|
try:
|
||||||
|
import xattr
|
||||||
|
xattr.setxattr(tmpfilename, 'user.ytdl.filesize', str(data_len))
|
||||||
|
except(OSError, IOError, ImportError) as err:
|
||||||
|
self.report_error('unable to set filesize xattr: %s' % str(err))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
stream.write(data_block)
|
stream.write(data_block)
|
||||||
except (IOError, OSError) as err:
|
except (IOError, OSError) as err:
|
||||||
self.to_stderr(u"\n")
|
self.to_stderr('\n')
|
||||||
self.report_error(u'unable to write data: %s' % str(err))
|
self.report_error('unable to write data: %s' % str(err))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
# Apply rate limit
|
||||||
|
self.slow_down(start, now, byte_counter - resume_len)
|
||||||
|
|
||||||
|
# end measuring of one loop run
|
||||||
|
now = time.time()
|
||||||
|
after = now
|
||||||
|
|
||||||
|
# Adjust block size
|
||||||
if not self.params.get('noresizebuffer', False):
|
if not self.params.get('noresizebuffer', False):
|
||||||
block_size = self.best_block_size(after - before, len(data_block))
|
block_size = self.best_block_size(after - before, len(data_block))
|
||||||
|
|
||||||
|
before = after
|
||||||
|
|
||||||
# Progress message
|
# Progress message
|
||||||
speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
|
speed = self.calc_speed(start, now, byte_counter - resume_len)
|
||||||
if data_len is None:
|
if data_len is None:
|
||||||
eta = percent = None
|
eta = None
|
||||||
else:
|
else:
|
||||||
percent = self.calc_percent(byte_counter, data_len)
|
|
||||||
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
||||||
self.report_progress(percent, data_len_str, speed, eta)
|
|
||||||
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
|
'status': 'downloading',
|
||||||
'downloaded_bytes': byte_counter,
|
'downloaded_bytes': byte_counter,
|
||||||
'total_bytes': data_len,
|
'total_bytes': data_len,
|
||||||
'tmpfilename': tmpfilename,
|
'tmpfilename': tmpfilename,
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'downloading',
|
|
||||||
'eta': eta,
|
'eta': eta,
|
||||||
'speed': speed,
|
'speed': speed,
|
||||||
|
'elapsed': now - start,
|
||||||
})
|
})
|
||||||
|
|
||||||
if is_test and byte_counter == data_len:
|
if is_test and byte_counter == data_len:
|
||||||
break
|
break
|
||||||
|
|
||||||
# Apply rate limit
|
|
||||||
self.slow_down(start, byte_counter - resume_len)
|
|
||||||
|
|
||||||
if stream is None:
|
if stream is None:
|
||||||
self.to_stderr(u"\n")
|
self.to_stderr('\n')
|
||||||
self.report_error(u'Did not get any data blocks')
|
self.report_error('Did not get any data blocks')
|
||||||
return False
|
return False
|
||||||
|
if tmpfilename != '-':
|
||||||
stream.close()
|
stream.close()
|
||||||
self.report_finish(data_len_str, (time.time() - start))
|
|
||||||
if data_len is not None and byte_counter != data_len:
|
if data_len is not None and byte_counter != data_len:
|
||||||
raise ContentTooShortError(byte_counter, int(data_len))
|
raise ContentTooShortError(byte_counter, int(data_len))
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
@@ -200,6 +233,7 @@ class HttpFD(FileDownloader):
|
|||||||
'total_bytes': byte_counter,
|
'total_bytes': byte_counter,
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
|
'elapsed': time.time() - start,
|
||||||
})
|
})
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
check_executable,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -13,19 +16,19 @@ class MplayerFD(FileDownloader):
|
|||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
|
args = [
|
||||||
|
'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy',
|
||||||
|
'-dumpstream', '-dumpfile', tmpfilename, url]
|
||||||
# Check for mplayer first
|
# Check for mplayer first
|
||||||
try:
|
if not check_executable('mplayer', ['-h']):
|
||||||
subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0])
|
||||||
except (OSError, IOError):
|
|
||||||
self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0])
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Download using mplayer.
|
# Download using mplayer.
|
||||||
retval = subprocess.call(args)
|
retval = subprocess.call(args)
|
||||||
if retval == 0:
|
if retval == 0:
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
|
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': fsize,
|
'downloaded_bytes': fsize,
|
||||||
@@ -35,6 +38,6 @@ class MplayerFD(FileDownloader):
|
|||||||
})
|
})
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.to_stderr(u"\n")
|
self.to_stderr('\n')
|
||||||
self.report_error(u'mplayer exited with code %d' % retval)
|
self.report_error('mplayer exited with code %d' % retval)
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -7,13 +7,19 @@ import sys
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
check_executable,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
format_bytes,
|
get_exe_version,
|
||||||
compat_str,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def rtmpdump_version():
|
||||||
|
return get_exe_version(
|
||||||
|
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
|
||||||
|
|
||||||
|
|
||||||
class RtmpFD(FileDownloader):
|
class RtmpFD(FileDownloader):
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
def run_rtmpdump(args):
|
def run_rtmpdump(args):
|
||||||
@@ -44,23 +50,23 @@ class RtmpFD(FileDownloader):
|
|||||||
if not resume_percent:
|
if not resume_percent:
|
||||||
resume_percent = percent
|
resume_percent = percent
|
||||||
resume_downloaded_data_len = downloaded_data_len
|
resume_downloaded_data_len = downloaded_data_len
|
||||||
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
|
time_now = time.time()
|
||||||
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
|
eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
|
||||||
|
speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
|
||||||
data_len = None
|
data_len = None
|
||||||
if percent > 0:
|
if percent > 0:
|
||||||
data_len = int(downloaded_data_len * 100 / percent)
|
data_len = int(downloaded_data_len * 100 / percent)
|
||||||
data_len_str = '~' + format_bytes(data_len)
|
|
||||||
self.report_progress(percent, data_len_str, speed, eta)
|
|
||||||
cursor_in_new_line = False
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
|
'status': 'downloading',
|
||||||
'downloaded_bytes': downloaded_data_len,
|
'downloaded_bytes': downloaded_data_len,
|
||||||
'total_bytes': data_len,
|
'total_bytes_estimate': data_len,
|
||||||
'tmpfilename': tmpfilename,
|
'tmpfilename': tmpfilename,
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'downloading',
|
|
||||||
'eta': eta,
|
'eta': eta,
|
||||||
|
'elapsed': time_now - start,
|
||||||
'speed': speed,
|
'speed': speed,
|
||||||
})
|
})
|
||||||
|
cursor_in_new_line = False
|
||||||
else:
|
else:
|
||||||
# no percent for live streams
|
# no percent for live streams
|
||||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
||||||
@@ -68,15 +74,15 @@ class RtmpFD(FileDownloader):
|
|||||||
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||||
time_now = time.time()
|
time_now = time.time()
|
||||||
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
||||||
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
|
||||||
cursor_in_new_line = False
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': downloaded_data_len,
|
'downloaded_bytes': downloaded_data_len,
|
||||||
'tmpfilename': tmpfilename,
|
'tmpfilename': tmpfilename,
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'downloading',
|
'status': 'downloading',
|
||||||
|
'elapsed': time_now - start,
|
||||||
'speed': speed,
|
'speed': speed,
|
||||||
})
|
})
|
||||||
|
cursor_in_new_line = False
|
||||||
elif self.params.get('verbose', False):
|
elif self.params.get('verbose', False):
|
||||||
if not cursor_in_new_line:
|
if not cursor_in_new_line:
|
||||||
self.to_screen('')
|
self.to_screen('')
|
||||||
@@ -97,22 +103,25 @@ class RtmpFD(FileDownloader):
|
|||||||
live = info_dict.get('rtmp_live', False)
|
live = info_dict.get('rtmp_live', False)
|
||||||
conn = info_dict.get('rtmp_conn', None)
|
conn = info_dict.get('rtmp_conn', None)
|
||||||
protocol = info_dict.get('rtmp_protocol', None)
|
protocol = info_dict.get('rtmp_protocol', None)
|
||||||
|
real_time = info_dict.get('rtmp_real_time', False)
|
||||||
|
no_resume = info_dict.get('no_resume', False)
|
||||||
|
continue_dl = info_dict.get('continuedl', True)
|
||||||
|
|
||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
test = self.params.get('test', False)
|
test = self.params.get('test', False)
|
||||||
|
|
||||||
# Check for rtmpdump first
|
# Check for rtmpdump first
|
||||||
try:
|
if not check_executable('rtmpdump', ['-h']):
|
||||||
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
|
||||||
except (OSError, IOError):
|
|
||||||
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
|
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
||||||
# the connection was interrumpted and resuming appears to be
|
# the connection was interrumpted and resuming appears to be
|
||||||
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
||||||
basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
|
basic_args = [
|
||||||
|
'rtmpdump', '--verbose', '-r', url,
|
||||||
|
'-o', encodeFilename(tmpfilename, True)]
|
||||||
if player_url is not None:
|
if player_url is not None:
|
||||||
basic_args += ['--swfVfy', player_url]
|
basic_args += ['--swfVfy', player_url]
|
||||||
if page_url is not None:
|
if page_url is not None:
|
||||||
@@ -136,7 +145,14 @@ class RtmpFD(FileDownloader):
|
|||||||
basic_args += ['--conn', conn]
|
basic_args += ['--conn', conn]
|
||||||
if protocol is not None:
|
if protocol is not None:
|
||||||
basic_args += ['--protocol', protocol]
|
basic_args += ['--protocol', protocol]
|
||||||
args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)]
|
if real_time:
|
||||||
|
basic_args += ['--realtime']
|
||||||
|
|
||||||
|
args = basic_args
|
||||||
|
if not no_resume and continue_dl and not live:
|
||||||
|
args += ['--resume']
|
||||||
|
if not live and continue_dl:
|
||||||
|
args += ['--skip', '1']
|
||||||
|
|
||||||
if sys.platform == 'win32' and sys.version_info < (3, 0):
|
if sys.platform == 'win32' and sys.version_info < (3, 0):
|
||||||
# Windows subprocess module does not actually support Unicode
|
# Windows subprocess module does not actually support Unicode
|
||||||
@@ -147,19 +163,7 @@ class RtmpFD(FileDownloader):
|
|||||||
else:
|
else:
|
||||||
subprocess_encoding = None
|
subprocess_encoding = None
|
||||||
|
|
||||||
if self.params.get('verbose', False):
|
self._debug_cmd(args, subprocess_encoding, exe='rtmpdump')
|
||||||
if subprocess_encoding:
|
|
||||||
str_args = [
|
|
||||||
a.decode(subprocess_encoding) if isinstance(a, bytes) else a
|
|
||||||
for a in args]
|
|
||||||
else:
|
|
||||||
str_args = args
|
|
||||||
try:
|
|
||||||
import pipes
|
|
||||||
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
|
|
||||||
except ImportError:
|
|
||||||
shell_quote = repr
|
|
||||||
self.to_screen('[debug] rtmpdump command line: ' + shell_quote(str_args))
|
|
||||||
|
|
||||||
RD_SUCCESS = 0
|
RD_SUCCESS = 0
|
||||||
RD_FAILED = 1
|
RD_FAILED = 1
|
||||||
|
|||||||
@@ -1,13 +1,24 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .abc import ABCIE
|
||||||
|
from .abc7news import Abc7NewsIE
|
||||||
from .academicearth import AcademicEarthCourseIE
|
from .academicearth import AcademicEarthCourseIE
|
||||||
from .addanime import AddAnimeIE
|
from .addanime import AddAnimeIE
|
||||||
|
from .adobetv import AdobeTVIE
|
||||||
|
from .adultswim import AdultSwimIE
|
||||||
|
from .aftenposten import AftenpostenIE
|
||||||
from .aftonbladet import AftonbladetIE
|
from .aftonbladet import AftonbladetIE
|
||||||
|
from .airmozilla import AirMozillaIE
|
||||||
|
from .aljazeera import AlJazeeraIE
|
||||||
|
from .alphaporno import AlphaPornoIE
|
||||||
from .anitube import AnitubeIE
|
from .anitube import AnitubeIE
|
||||||
|
from .anysex import AnySexIE
|
||||||
from .aol import AolIE
|
from .aol import AolIE
|
||||||
from .allocine import AllocineIE
|
from .allocine import AllocineIE
|
||||||
from .aparat import AparatIE
|
from .aparat import AparatIE
|
||||||
from .appletrailers import AppleTrailersIE
|
from .appletrailers import AppleTrailersIE
|
||||||
from .archiveorg import ArchiveOrgIE
|
from .archiveorg import ArchiveOrgIE
|
||||||
from .ard import ARDIE
|
from .ard import ARDIE, ARDMediathekIE
|
||||||
from .arte import (
|
from .arte import (
|
||||||
ArteTvIE,
|
ArteTvIE,
|
||||||
ArteTVPlus7IE,
|
ArteTVPlus7IE,
|
||||||
@@ -17,72 +28,124 @@ from .arte import (
|
|||||||
ArteTVDDCIE,
|
ArteTVDDCIE,
|
||||||
ArteTVEmbedIE,
|
ArteTVEmbedIE,
|
||||||
)
|
)
|
||||||
from .auengine import AUEngineIE
|
from .atresplayer import AtresPlayerIE
|
||||||
|
from .atttechchannel import ATTTechChannelIE
|
||||||
|
from .audiomack import AudiomackIE, AudiomackAlbumIE
|
||||||
|
from .azubu import AzubuIE
|
||||||
from .bambuser import BambuserIE, BambuserChannelIE
|
from .bambuser import BambuserIE, BambuserChannelIE
|
||||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||||
from .bbccouk import BBCCoUkIE
|
from .bbccouk import BBCCoUkIE
|
||||||
|
from .beeg import BeegIE
|
||||||
|
from .behindkink import BehindKinkIE
|
||||||
|
from .beatportpro import BeatportProIE
|
||||||
|
from .bet import BetIE
|
||||||
|
from .bild import BildIE
|
||||||
from .bilibili import BiliBiliIE
|
from .bilibili import BiliBiliIE
|
||||||
from .blinkx import BlinkxIE
|
from .blinkx import BlinkxIE
|
||||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||||
from .bloomberg import BloombergIE
|
from .bloomberg import BloombergIE
|
||||||
|
from .bpb import BpbIE
|
||||||
from .br import BRIE
|
from .br import BRIE
|
||||||
from .breakcom import BreakIE
|
from .breakcom import BreakIE
|
||||||
from .brightcove import BrightcoveIE
|
from .brightcove import BrightcoveIE
|
||||||
|
from .buzzfeed import BuzzFeedIE
|
||||||
from .byutv import BYUtvIE
|
from .byutv import BYUtvIE
|
||||||
from .c56 import C56IE
|
from .c56 import C56IE
|
||||||
|
from .camdemy import (
|
||||||
|
CamdemyIE,
|
||||||
|
CamdemyFolderIE
|
||||||
|
)
|
||||||
from .canal13cl import Canal13clIE
|
from .canal13cl import Canal13clIE
|
||||||
from .canalplus import CanalplusIE
|
from .canalplus import CanalplusIE
|
||||||
from .canalc2 import Canalc2IE
|
from .canalc2 import Canalc2IE
|
||||||
from .cbs import CBSIE
|
from .cbs import CBSIE
|
||||||
from .cbsnews import CBSNewsIE
|
from .cbsnews import CBSNewsIE
|
||||||
|
from .cbssports import CBSSportsIE
|
||||||
|
from .ccc import CCCIE
|
||||||
from .ceskatelevize import CeskaTelevizeIE
|
from .ceskatelevize import CeskaTelevizeIE
|
||||||
from .channel9 import Channel9IE
|
from .channel9 import Channel9IE
|
||||||
from .chilloutzone import ChilloutzoneIE
|
from .chilloutzone import ChilloutzoneIE
|
||||||
from .cinemassacre import CinemassacreIE
|
from .chirbit import (
|
||||||
|
ChirbitIE,
|
||||||
|
ChirbitProfileIE,
|
||||||
|
)
|
||||||
|
from .cinchcast import CinchcastIE
|
||||||
from .clipfish import ClipfishIE
|
from .clipfish import ClipfishIE
|
||||||
from .cliphunter import CliphunterIE
|
from .cliphunter import CliphunterIE
|
||||||
from .clipsyndicate import ClipsyndicateIE
|
from .clipsyndicate import ClipsyndicateIE
|
||||||
|
from .cloudy import CloudyIE
|
||||||
from .clubic import ClubicIE
|
from .clubic import ClubicIE
|
||||||
from .cmt import CMTIE
|
from .cmt import CMTIE
|
||||||
from .cnet import CNETIE
|
from .cnet import CNETIE
|
||||||
from .cnn import (
|
from .cnn import (
|
||||||
CNNIE,
|
CNNIE,
|
||||||
CNNBlogsIE,
|
CNNBlogsIE,
|
||||||
|
CNNArticleIE,
|
||||||
)
|
)
|
||||||
from .collegehumor import CollegeHumorIE
|
from .collegehumor import CollegeHumorIE
|
||||||
|
from .collegerama import CollegeRamaIE
|
||||||
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||||
|
from .comcarcoff import ComCarCoffIE
|
||||||
|
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
||||||
from .condenast import CondeNastIE
|
from .condenast import CondeNastIE
|
||||||
|
from .cracked import CrackedIE
|
||||||
from .criterion import CriterionIE
|
from .criterion import CriterionIE
|
||||||
from .crunchyroll import CrunchyrollIE
|
from .crunchyroll import (
|
||||||
|
CrunchyrollIE,
|
||||||
|
CrunchyrollShowPlaylistIE
|
||||||
|
)
|
||||||
from .cspan import CSpanIE
|
from .cspan import CSpanIE
|
||||||
from .d8 import D8IE
|
from .ctsnews import CtsNewsIE
|
||||||
from .dailymotion import (
|
from .dailymotion import (
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
DailymotionPlaylistIE,
|
DailymotionPlaylistIE,
|
||||||
DailymotionUserIE,
|
DailymotionUserIE,
|
||||||
)
|
)
|
||||||
from .daum import DaumIE
|
from .daum import DaumIE
|
||||||
|
from .dbtv import DBTVIE
|
||||||
|
from .dctp import DctpTvIE
|
||||||
|
from .deezer import DeezerPlaylistIE
|
||||||
|
from .dfb import DFBIE
|
||||||
|
from .dhm import DHMIE
|
||||||
from .dotsub import DotsubIE
|
from .dotsub import DotsubIE
|
||||||
|
from .douyutv import DouyuTVIE
|
||||||
from .dreisat import DreiSatIE
|
from .dreisat import DreiSatIE
|
||||||
|
from .drbonanza import DRBonanzaIE
|
||||||
|
from .drtuber import DrTuberIE
|
||||||
from .drtv import DRTVIE
|
from .drtv import DRTVIE
|
||||||
|
from .dvtv import DVTVIE
|
||||||
|
from .dump import DumpIE
|
||||||
|
from .dumpert import DumpertIE
|
||||||
from .defense import DefenseGouvFrIE
|
from .defense import DefenseGouvFrIE
|
||||||
from .discovery import DiscoveryIE
|
from .discovery import DiscoveryIE
|
||||||
from .divxstage import DivxStageIE
|
from .divxstage import DivxStageIE
|
||||||
from .dropbox import DropboxIE
|
from .dropbox import DropboxIE
|
||||||
|
from .eagleplatform import EaglePlatformIE
|
||||||
from .ebaumsworld import EbaumsWorldIE
|
from .ebaumsworld import EbaumsWorldIE
|
||||||
|
from .echomsk import EchoMskIE
|
||||||
from .ehow import EHowIE
|
from .ehow import EHowIE
|
||||||
from .eighttracks import EightTracksIE
|
from .eighttracks import EightTracksIE
|
||||||
|
from .einthusan import EinthusanIE
|
||||||
from .eitb import EitbIE
|
from .eitb import EitbIE
|
||||||
|
from .ellentv import (
|
||||||
|
EllenTVIE,
|
||||||
|
EllenTVClipsIE,
|
||||||
|
)
|
||||||
from .elpais import ElPaisIE
|
from .elpais import ElPaisIE
|
||||||
from .empflix import EmpflixIE
|
from .embedly import EmbedlyIE
|
||||||
|
from .empflix import EMPFlixIE
|
||||||
from .engadget import EngadgetIE
|
from .engadget import EngadgetIE
|
||||||
|
from .eporner import EpornerIE
|
||||||
|
from .eroprofile import EroProfileIE
|
||||||
from .escapist import EscapistIE
|
from .escapist import EscapistIE
|
||||||
from .everyonesmixtape import EveryonesMixtapeIE
|
from .everyonesmixtape import EveryonesMixtapeIE
|
||||||
from .exfm import ExfmIE
|
from .exfm import ExfmIE
|
||||||
|
from .expotv import ExpoTVIE
|
||||||
from .extremetube import ExtremeTubeIE
|
from .extremetube import ExtremeTubeIE
|
||||||
from .facebook import FacebookIE
|
from .facebook import FacebookIE
|
||||||
from .faz import FazIE
|
from .faz import FazIE
|
||||||
from .fc2 import FC2IE
|
from .fc2 import FC2IE
|
||||||
|
from .firedrive import FiredriveIE
|
||||||
from .firstpost import FirstpostIE
|
from .firstpost import FirstpostIE
|
||||||
from .firsttv import FirstTVIE
|
from .firsttv import FirstTVIE
|
||||||
from .fivemin import FiveMinIE
|
from .fivemin import FiveMinIE
|
||||||
@@ -91,7 +154,11 @@ from .fktv import (
|
|||||||
FKTVPosteckeIE,
|
FKTVPosteckeIE,
|
||||||
)
|
)
|
||||||
from .flickr import FlickrIE
|
from .flickr import FlickrIE
|
||||||
|
from .folketinget import FolketingetIE
|
||||||
|
from .footyroom import FootyRoomIE
|
||||||
from .fourtube import FourTubeIE
|
from .fourtube import FourTubeIE
|
||||||
|
from .foxgay import FoxgayIE
|
||||||
|
from .foxnews import FoxNewsIE
|
||||||
from .franceculture import FranceCultureIE
|
from .franceculture import FranceCultureIE
|
||||||
from .franceinter import FranceInterIE
|
from .franceinter import FranceInterIE
|
||||||
from .francetv import (
|
from .francetv import (
|
||||||
@@ -103,22 +170,46 @@ from .francetv import (
|
|||||||
)
|
)
|
||||||
from .freesound import FreesoundIE
|
from .freesound import FreesoundIE
|
||||||
from .freespeech import FreespeechIE
|
from .freespeech import FreespeechIE
|
||||||
|
from .freevideo import FreeVideoIE
|
||||||
from .funnyordie import FunnyOrDieIE
|
from .funnyordie import FunnyOrDieIE
|
||||||
from .gamekings import GamekingsIE
|
from .gamekings import GamekingsIE
|
||||||
from .gameone import GameOneIE
|
from .gameone import (
|
||||||
|
GameOneIE,
|
||||||
|
GameOnePlaylistIE,
|
||||||
|
)
|
||||||
from .gamespot import GameSpotIE
|
from .gamespot import GameSpotIE
|
||||||
|
from .gamestar import GameStarIE
|
||||||
from .gametrailers import GametrailersIE
|
from .gametrailers import GametrailersIE
|
||||||
|
from .gazeta import GazetaIE
|
||||||
from .gdcvault import GDCVaultIE
|
from .gdcvault import GDCVaultIE
|
||||||
from .generic import GenericIE
|
from .generic import GenericIE
|
||||||
|
from .giantbomb import GiantBombIE
|
||||||
|
from .giga import GigaIE
|
||||||
|
from .glide import GlideIE
|
||||||
|
from .globo import GloboIE
|
||||||
|
from .godtube import GodTubeIE
|
||||||
|
from .goldenmoustache import GoldenMoustacheIE
|
||||||
|
from .golem import GolemIE
|
||||||
from .googleplus import GooglePlusIE
|
from .googleplus import GooglePlusIE
|
||||||
from .googlesearch import GoogleSearchIE
|
from .googlesearch import GoogleSearchIE
|
||||||
from .gorillavid import GorillaVidIE
|
from .gorillavid import GorillaVidIE
|
||||||
from .goshgay import GoshgayIE
|
from .goshgay import GoshgayIE
|
||||||
|
from .grooveshark import GroovesharkIE
|
||||||
|
from .groupon import GrouponIE
|
||||||
from .hark import HarkIE
|
from .hark import HarkIE
|
||||||
|
from .hearthisat import HearThisAtIE
|
||||||
|
from .heise import HeiseIE
|
||||||
|
from .hellporno import HellPornoIE
|
||||||
from .helsinki import HelsinkiIE
|
from .helsinki import HelsinkiIE
|
||||||
from .hentaistigma import HentaiStigmaIE
|
from .hentaistigma import HentaiStigmaIE
|
||||||
|
from .historicfilms import HistoricFilmsIE
|
||||||
|
from .history import HistoryIE
|
||||||
|
from .hitbox import HitboxIE, HitboxLiveIE
|
||||||
|
from .hornbunny import HornBunnyIE
|
||||||
|
from .hostingbulk import HostingBulkIE
|
||||||
from .hotnewhiphop import HotNewHipHopIE
|
from .hotnewhiphop import HotNewHipHopIE
|
||||||
from .howcast import HowcastIE
|
from .howcast import HowcastIE
|
||||||
|
from .howstuffworks import HowStuffWorksIE
|
||||||
from .huffpost import HuffPostIE
|
from .huffpost import HuffPostIE
|
||||||
from .hypem import HypemIE
|
from .hypem import HypemIE
|
||||||
from .iconosquare import IconosquareIE
|
from .iconosquare import IconosquareIE
|
||||||
@@ -127,6 +218,7 @@ from .imdb import (
|
|||||||
ImdbIE,
|
ImdbIE,
|
||||||
ImdbListIE
|
ImdbListIE
|
||||||
)
|
)
|
||||||
|
from .imgur import ImgurIE
|
||||||
from .ina import InaIE
|
from .ina import InaIE
|
||||||
from .infoq import InfoQIE
|
from .infoq import InfoQIE
|
||||||
from .instagram import InstagramIE, InstagramUserIE
|
from .instagram import InstagramIE, InstagramUserIE
|
||||||
@@ -136,19 +228,31 @@ from .ivi import (
|
|||||||
IviIE,
|
IviIE,
|
||||||
IviCompilationIE
|
IviCompilationIE
|
||||||
)
|
)
|
||||||
|
from .izlesene import IzleseneIE
|
||||||
from .jadorecettepub import JadoreCettePubIE
|
from .jadorecettepub import JadoreCettePubIE
|
||||||
from .jeuxvideo import JeuxVideoIE
|
from .jeuxvideo import JeuxVideoIE
|
||||||
|
from .jove import JoveIE
|
||||||
from .jukebox import JukeboxIE
|
from .jukebox import JukeboxIE
|
||||||
from .justintv import JustinTVIE
|
|
||||||
from .jpopsukitv import JpopsukiIE
|
from .jpopsukitv import JpopsukiIE
|
||||||
|
from .kaltura import KalturaIE
|
||||||
|
from .kanalplay import KanalPlayIE
|
||||||
from .kankan import KankanIE
|
from .kankan import KankanIE
|
||||||
|
from .karaoketv import KaraoketvIE
|
||||||
from .keezmovies import KeezMoviesIE
|
from .keezmovies import KeezMoviesIE
|
||||||
from .khanacademy import KhanAcademyIE
|
from .khanacademy import KhanAcademyIE
|
||||||
from .kickstarter import KickStarterIE
|
from .kickstarter import KickStarterIE
|
||||||
from .keek import KeekIE
|
from .keek import KeekIE
|
||||||
from .kontrtube import KontrTubeIE
|
from .kontrtube import KontrTubeIE
|
||||||
|
from .krasview import KrasViewIE
|
||||||
from .ku6 import Ku6IE
|
from .ku6 import Ku6IE
|
||||||
from .la7 import LA7IE
|
from .la7 import LA7IE
|
||||||
|
from .laola1tv import Laola1TvIE
|
||||||
|
from .letv import (
|
||||||
|
LetvIE,
|
||||||
|
LetvTvIE,
|
||||||
|
LetvPlaylistIE
|
||||||
|
)
|
||||||
|
from .libsyn import LibsynIE
|
||||||
from .lifenews import LifeNewsIE
|
from .lifenews import LifeNewsIE
|
||||||
from .liveleak import LiveLeakIE
|
from .liveleak import LiveLeakIE
|
||||||
from .livestream import (
|
from .livestream import (
|
||||||
@@ -156,6 +260,8 @@ from .livestream import (
|
|||||||
LivestreamOriginalIE,
|
LivestreamOriginalIE,
|
||||||
LivestreamShortenerIE,
|
LivestreamShortenerIE,
|
||||||
)
|
)
|
||||||
|
from .lnkgo import LnkGoIE
|
||||||
|
from .lrt import LRTIE
|
||||||
from .lynda import (
|
from .lynda import (
|
||||||
LyndaIE,
|
LyndaIE,
|
||||||
LyndaCourseIE
|
LyndaCourseIE
|
||||||
@@ -167,14 +273,24 @@ from .malemotion import MalemotionIE
|
|||||||
from .mdr import MDRIE
|
from .mdr import MDRIE
|
||||||
from .metacafe import MetacafeIE
|
from .metacafe import MetacafeIE
|
||||||
from .metacritic import MetacriticIE
|
from .metacritic import MetacriticIE
|
||||||
|
from .mgoon import MgoonIE
|
||||||
|
from .minhateca import MinhatecaIE
|
||||||
|
from .ministrygrid import MinistryGridIE
|
||||||
|
from .miomio import MioMioIE
|
||||||
from .mit import TechTVMITIE, MITIE, OCWMITIE
|
from .mit import TechTVMITIE, MITIE, OCWMITIE
|
||||||
|
from .mitele import MiTeleIE
|
||||||
from .mixcloud import MixcloudIE
|
from .mixcloud import MixcloudIE
|
||||||
|
from .mlb import MLBIE
|
||||||
from .mpora import MporaIE
|
from .mpora import MporaIE
|
||||||
|
from .moevideo import MoeVideoIE
|
||||||
from .mofosex import MofosexIE
|
from .mofosex import MofosexIE
|
||||||
|
from .mojvideo import MojvideoIE
|
||||||
|
from .moniker import MonikerIE
|
||||||
from .mooshare import MooshareIE
|
from .mooshare import MooshareIE
|
||||||
from .morningstar import MorningstarIE
|
from .morningstar import MorningstarIE
|
||||||
from .motherless import MotherlessIE
|
from .motherless import MotherlessIE
|
||||||
from .motorsport import MotorsportIE
|
from .motorsport import MotorsportIE
|
||||||
|
from .movieclips import MovieClipsIE
|
||||||
from .moviezine import MoviezineIE
|
from .moviezine import MoviezineIE
|
||||||
from .movshare import MovShareIE
|
from .movshare import MovShareIE
|
||||||
from .mtv import (
|
from .mtv import (
|
||||||
@@ -182,73 +298,149 @@ from .mtv import (
|
|||||||
MTVServicesEmbeddedIE,
|
MTVServicesEmbeddedIE,
|
||||||
MTVIggyIE,
|
MTVIggyIE,
|
||||||
)
|
)
|
||||||
|
from .muenchentv import MuenchenTVIE
|
||||||
from .musicplayon import MusicPlayOnIE
|
from .musicplayon import MusicPlayOnIE
|
||||||
|
from .musicvault import MusicVaultIE
|
||||||
from .muzu import MuzuTVIE
|
from .muzu import MuzuTVIE
|
||||||
from .myspace import MySpaceIE
|
from .myspace import MySpaceIE, MySpaceAlbumIE
|
||||||
from .myspass import MySpassIE
|
from .myspass import MySpassIE
|
||||||
from .myvideo import MyVideoIE
|
from .myvideo import MyVideoIE
|
||||||
|
from .myvidster import MyVidsterIE
|
||||||
|
from .nationalgeographic import NationalGeographicIE
|
||||||
from .naver import NaverIE
|
from .naver import NaverIE
|
||||||
from .nba import NBAIE
|
from .nba import NBAIE
|
||||||
from .nbc import (
|
from .nbc import (
|
||||||
NBCIE,
|
NBCIE,
|
||||||
NBCNewsIE,
|
NBCNewsIE,
|
||||||
|
NBCSportsIE,
|
||||||
|
NBCSportsVPlayerIE,
|
||||||
)
|
)
|
||||||
from .ndr import NDRIE
|
from .ndr import NDRIE
|
||||||
from .ndtv import NDTVIE
|
from .ndtv import NDTVIE
|
||||||
|
from .netzkino import NetzkinoIE
|
||||||
|
from .nerdcubed import NerdCubedFeedIE
|
||||||
|
from .nerdist import NerdistIE
|
||||||
from .newgrounds import NewgroundsIE
|
from .newgrounds import NewgroundsIE
|
||||||
from .newstube import NewstubeIE
|
from .newstube import NewstubeIE
|
||||||
|
from .nextmedia import (
|
||||||
|
NextMediaIE,
|
||||||
|
NextMediaActionNewsIE,
|
||||||
|
AppleDailyRealtimeNewsIE,
|
||||||
|
AppleDailyAnimationNewsIE
|
||||||
|
)
|
||||||
from .nfb import NFBIE
|
from .nfb import NFBIE
|
||||||
from .nhl import NHLIE, NHLVideocenterIE
|
from .nfl import NFLIE
|
||||||
from .niconico import NiconicoIE
|
from .nhl import (
|
||||||
|
NHLIE,
|
||||||
|
NHLNewsIE,
|
||||||
|
NHLVideocenterIE,
|
||||||
|
)
|
||||||
|
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
||||||
from .ninegag import NineGagIE
|
from .ninegag import NineGagIE
|
||||||
from .noco import NocoIE
|
from .noco import NocoIE
|
||||||
from .normalboots import NormalbootsIE
|
from .normalboots import NormalbootsIE
|
||||||
|
from .nosvideo import NosVideoIE
|
||||||
from .novamov import NovaMovIE
|
from .novamov import NovaMovIE
|
||||||
from .nowness import NownessIE
|
from .nowness import NownessIE
|
||||||
from .nowvideo import NowVideoIE
|
from .nowvideo import NowVideoIE
|
||||||
from .npo import NPOIE
|
from .npo import (
|
||||||
|
NPOIE,
|
||||||
|
NPOLiveIE,
|
||||||
|
NPORadioIE,
|
||||||
|
NPORadioFragmentIE,
|
||||||
|
TegenlichtVproIE,
|
||||||
|
)
|
||||||
from .nrk import (
|
from .nrk import (
|
||||||
NRKIE,
|
NRKIE,
|
||||||
|
NRKPlaylistIE,
|
||||||
NRKTVIE,
|
NRKTVIE,
|
||||||
)
|
)
|
||||||
from .ntv import NTVIE
|
from .ntvde import NTVDeIE
|
||||||
|
from .ntvru import NTVRuIE
|
||||||
from .nytimes import NYTimesIE
|
from .nytimes import NYTimesIE
|
||||||
from .nuvid import NuvidIE
|
from .nuvid import NuvidIE
|
||||||
from .oe1 import OE1IE
|
from .odnoklassniki import OdnoklassnikiIE
|
||||||
|
from .oktoberfesttv import OktoberfestTVIE
|
||||||
from .ooyala import OoyalaIE
|
from .ooyala import OoyalaIE
|
||||||
from .orf import ORFIE
|
from .openfilm import OpenFilmIE
|
||||||
|
from .orf import (
|
||||||
|
ORFTVthekIE,
|
||||||
|
ORFOE1IE,
|
||||||
|
ORFFM4IE,
|
||||||
|
ORFIPTVIE,
|
||||||
|
)
|
||||||
from .parliamentliveuk import ParliamentLiveUKIE
|
from .parliamentliveuk import ParliamentLiveUKIE
|
||||||
|
from .patreon import PatreonIE
|
||||||
from .pbs import PBSIE
|
from .pbs import PBSIE
|
||||||
|
from .phoenix import PhoenixIE
|
||||||
from .photobucket import PhotobucketIE
|
from .photobucket import PhotobucketIE
|
||||||
|
from .planetaplay import PlanetaPlayIE
|
||||||
|
from .pladform import PladformIE
|
||||||
|
from .played import PlayedIE
|
||||||
|
from .playfm import PlayFMIE
|
||||||
from .playvid import PlayvidIE
|
from .playvid import PlayvidIE
|
||||||
|
from .playwire import PlaywireIE
|
||||||
from .podomatic import PodomaticIE
|
from .podomatic import PodomaticIE
|
||||||
from .pornhd import PornHdIE
|
from .pornhd import PornHdIE
|
||||||
from .pornhub import PornHubIE
|
from .pornhub import (
|
||||||
|
PornHubIE,
|
||||||
|
PornHubPlaylistIE,
|
||||||
|
)
|
||||||
from .pornotube import PornotubeIE
|
from .pornotube import PornotubeIE
|
||||||
|
from .pornoxo import PornoXOIE
|
||||||
|
from .primesharetv import PrimeShareTVIE
|
||||||
|
from .promptfile import PromptFileIE
|
||||||
from .prosiebensat1 import ProSiebenSat1IE
|
from .prosiebensat1 import ProSiebenSat1IE
|
||||||
|
from .puls4 import Puls4IE
|
||||||
from .pyvideo import PyvideoIE
|
from .pyvideo import PyvideoIE
|
||||||
|
from .quickvid import QuickVidIE
|
||||||
|
from .r7 import R7IE
|
||||||
|
from .radiode import RadioDeIE
|
||||||
|
from .radiobremen import RadioBremenIE
|
||||||
from .radiofrance import RadioFranceIE
|
from .radiofrance import RadioFranceIE
|
||||||
from .rai import RaiIE
|
from .rai import RaiIE
|
||||||
from .rbmaradio import RBMARadioIE
|
from .rbmaradio import RBMARadioIE
|
||||||
from .redtube import RedTubeIE
|
from .redtube import RedTubeIE
|
||||||
|
from .restudy import RestudyIE
|
||||||
|
from .reverbnation import ReverbNationIE
|
||||||
from .ringtv import RingTVIE
|
from .ringtv import RingTVIE
|
||||||
from .ro220 import Ro220IE
|
from .ro220 import Ro220IE
|
||||||
from .rottentomatoes import RottenTomatoesIE
|
from .rottentomatoes import RottenTomatoesIE
|
||||||
from .roxwel import RoxwelIE
|
from .roxwel import RoxwelIE
|
||||||
from .rtbf import RTBFIE
|
from .rtbf import RTBFIE
|
||||||
|
from .rte import RteIE
|
||||||
|
from .rtlnl import RtlNlIE
|
||||||
from .rtlnow import RTLnowIE
|
from .rtlnow import RTLnowIE
|
||||||
|
from .rtl2 import RTL2IE
|
||||||
|
from .rtp import RTPIE
|
||||||
from .rts import RTSIE
|
from .rts import RTSIE
|
||||||
from .rtve import RTVEALaCartaIE
|
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
|
||||||
|
from .ruhd import RUHDIE
|
||||||
from .rutube import (
|
from .rutube import (
|
||||||
RutubeIE,
|
RutubeIE,
|
||||||
RutubeChannelIE,
|
RutubeChannelIE,
|
||||||
|
RutubeEmbedIE,
|
||||||
RutubeMovieIE,
|
RutubeMovieIE,
|
||||||
RutubePersonIE,
|
RutubePersonIE,
|
||||||
)
|
)
|
||||||
from .rutv import RUTVIE
|
from .rutv import RUTVIE
|
||||||
|
from .sandia import SandiaIE
|
||||||
|
from .safari import (
|
||||||
|
SafariIE,
|
||||||
|
SafariCourseIE,
|
||||||
|
)
|
||||||
|
from .sapo import SapoIE
|
||||||
from .savefrom import SaveFromIE
|
from .savefrom import SaveFromIE
|
||||||
|
from .sbs import SBSIE
|
||||||
from .scivee import SciVeeIE
|
from .scivee import SciVeeIE
|
||||||
|
from .screencast import ScreencastIE
|
||||||
|
from .screencastomatic import ScreencastOMaticIE
|
||||||
|
from .screenwavemedia import CinemassacreIE, ScreenwaveMediaIE, TeamFourIE
|
||||||
from .servingsys import ServingSysIE
|
from .servingsys import ServingSysIE
|
||||||
|
from .sexu import SexuIE
|
||||||
|
from .sexykarma import SexyKarmaIE
|
||||||
|
from .shared import SharedIE
|
||||||
|
from .sharesix import ShareSixIE
|
||||||
from .sina import SinaIE
|
from .sina import SinaIE
|
||||||
from .slideshare import SlideshareIE
|
from .slideshare import SlideshareIE
|
||||||
from .slutload import SlutloadIE
|
from .slutload import SlutloadIE
|
||||||
@@ -258,6 +450,8 @@ from .smotri import (
|
|||||||
SmotriUserIE,
|
SmotriUserIE,
|
||||||
SmotriBroadcastIE,
|
SmotriBroadcastIE,
|
||||||
)
|
)
|
||||||
|
from .snotr import SnotrIE
|
||||||
|
from .sockshare import SockshareIE
|
||||||
from .sohu import SohuIE
|
from .sohu import SohuIE
|
||||||
from .soundcloud import (
|
from .soundcloud import (
|
||||||
SoundcloudIE,
|
SoundcloudIE,
|
||||||
@@ -265,24 +459,37 @@ from .soundcloud import (
|
|||||||
SoundcloudUserIE,
|
SoundcloudUserIE,
|
||||||
SoundcloudPlaylistIE
|
SoundcloudPlaylistIE
|
||||||
)
|
)
|
||||||
from .soundgasm import SoundgasmIE
|
from .soundgasm import (
|
||||||
from .southparkstudios import (
|
SoundgasmIE,
|
||||||
SouthParkStudiosIE,
|
SoundgasmProfileIE
|
||||||
|
)
|
||||||
|
from .southpark import (
|
||||||
|
SouthParkIE,
|
||||||
SouthparkDeIE,
|
SouthparkDeIE,
|
||||||
)
|
)
|
||||||
from .space import SpaceIE
|
from .space import SpaceIE
|
||||||
from .spankwire import SpankwireIE
|
from .spankwire import SpankwireIE
|
||||||
from .spiegel import SpiegelIE
|
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||||
from .spiegeltv import SpiegeltvIE
|
from .spiegeltv import SpiegeltvIE
|
||||||
from .spike import SpikeIE
|
from .spike import SpikeIE
|
||||||
|
from .sport5 import Sport5IE
|
||||||
|
from .sportbox import SportBoxIE
|
||||||
|
from .sportdeutschland import SportDeutschlandIE
|
||||||
|
from .srmediathek import SRMediathekIE
|
||||||
|
from .ssa import SSAIE
|
||||||
from .stanfordoc import StanfordOpenClassroomIE
|
from .stanfordoc import StanfordOpenClassroomIE
|
||||||
from .steam import SteamIE
|
from .steam import SteamIE
|
||||||
from .streamcloud import StreamcloudIE
|
from .streamcloud import StreamcloudIE
|
||||||
from .streamcz import StreamCZIE
|
from .streamcz import StreamCZIE
|
||||||
|
from .streetvoice import StreetVoiceIE
|
||||||
|
from .sunporno import SunPornoIE
|
||||||
|
from .svtplay import SVTPlayIE
|
||||||
from .swrmediathek import SWRMediathekIE
|
from .swrmediathek import SWRMediathekIE
|
||||||
from .syfy import SyfyIE
|
from .syfy import SyfyIE
|
||||||
from .sztvhu import SztvHuIE
|
from .sztvhu import SztvHuIE
|
||||||
from .tagesschau import TagesschauIE
|
from .tagesschau import TagesschauIE
|
||||||
|
from .tapely import TapelyIE
|
||||||
|
from .tass import TassIE
|
||||||
from .teachertube import (
|
from .teachertube import (
|
||||||
TeacherTubeIE,
|
TeacherTubeIE,
|
||||||
TeacherTubeUserIE,
|
TeacherTubeUserIE,
|
||||||
@@ -291,13 +498,26 @@ from .teachingchannel import TeachingChannelIE
|
|||||||
from .teamcoco import TeamcocoIE
|
from .teamcoco import TeamcocoIE
|
||||||
from .techtalks import TechTalksIE
|
from .techtalks import TechTalksIE
|
||||||
from .ted import TEDIE
|
from .ted import TEDIE
|
||||||
|
from .telebruxelles import TeleBruxellesIE
|
||||||
|
from .telecinco import TelecincoIE
|
||||||
|
from .telemb import TeleMBIE
|
||||||
|
from .teletask import TeleTaskIE
|
||||||
from .tenplay import TenPlayIE
|
from .tenplay import TenPlayIE
|
||||||
from .testurl import TestURLIE
|
from .testurl import TestURLIE
|
||||||
|
from .testtube import TestTubeIE
|
||||||
from .tf1 import TF1IE
|
from .tf1 import TF1IE
|
||||||
|
from .theonion import TheOnionIE
|
||||||
from .theplatform import ThePlatformIE
|
from .theplatform import ThePlatformIE
|
||||||
|
from .thesixtyone import TheSixtyOneIE
|
||||||
from .thisav import ThisAVIE
|
from .thisav import ThisAVIE
|
||||||
from .tinypic import TinyPicIE
|
from .tinypic import TinyPicIE
|
||||||
from .tlc import TlcIE, TlcDeIE
|
from .tlc import TlcIE, TlcDeIE
|
||||||
|
from .tmz import TMZIE
|
||||||
|
from .tnaflix import TNAFlixIE
|
||||||
|
from .thvideo import (
|
||||||
|
THVideoIE,
|
||||||
|
THVideoPlaylistIE
|
||||||
|
)
|
||||||
from .toutv import TouTvIE
|
from .toutv import TouTvIE
|
||||||
from .toypics import ToypicsUserIE, ToypicsIE
|
from .toypics import ToypicsUserIE, ToypicsIE
|
||||||
from .traileraddict import TrailerAddictIE
|
from .traileraddict import TrailerAddictIE
|
||||||
@@ -306,37 +526,68 @@ from .trutube import TruTubeIE
|
|||||||
from .tube8 import Tube8IE
|
from .tube8 import Tube8IE
|
||||||
from .tudou import TudouIE
|
from .tudou import TudouIE
|
||||||
from .tumblr import TumblrIE
|
from .tumblr import TumblrIE
|
||||||
|
from .tunein import TuneInIE
|
||||||
|
from .turbo import TurboIE
|
||||||
from .tutv import TutvIE
|
from .tutv import TutvIE
|
||||||
|
from .tv4 import TV4IE
|
||||||
from .tvigle import TvigleIE
|
from .tvigle import TvigleIE
|
||||||
from .tvp import TvpIE
|
from .tvp import TvpIE, TvpSeriesIE
|
||||||
|
from .tvplay import TVPlayIE
|
||||||
|
from .tweakers import TweakersIE
|
||||||
|
from .twentyfourvideo import TwentyFourVideoIE
|
||||||
|
from .twentytwotracks import (
|
||||||
|
TwentyTwoTracksIE,
|
||||||
|
TwentyTwoTracksGenreIE
|
||||||
|
)
|
||||||
|
from .twitch import (
|
||||||
|
TwitchVideoIE,
|
||||||
|
TwitchChapterIE,
|
||||||
|
TwitchVodIE,
|
||||||
|
TwitchProfileIE,
|
||||||
|
TwitchPastBroadcastsIE,
|
||||||
|
TwitchBookmarksIE,
|
||||||
|
TwitchStreamIE,
|
||||||
|
)
|
||||||
|
from .ubu import UbuIE
|
||||||
from .udemy import (
|
from .udemy import (
|
||||||
UdemyIE,
|
UdemyIE,
|
||||||
UdemyCourseIE
|
UdemyCourseIE
|
||||||
)
|
)
|
||||||
|
from .ultimedia import UltimediaIE
|
||||||
from .unistra import UnistraIE
|
from .unistra import UnistraIE
|
||||||
from .urort import UrortIE
|
from .urort import UrortIE
|
||||||
from .ustream import UstreamIE, UstreamChannelIE
|
from .ustream import UstreamIE, UstreamChannelIE
|
||||||
|
from .varzesh3 import Varzesh3IE
|
||||||
from .vbox7 import Vbox7IE
|
from .vbox7 import Vbox7IE
|
||||||
from .veehd import VeeHDIE
|
from .veehd import VeeHDIE
|
||||||
from .veoh import VeohIE
|
from .veoh import VeohIE
|
||||||
|
from .vessel import VesselIE
|
||||||
from .vesti import VestiIE
|
from .vesti import VestiIE
|
||||||
from .vevo import VevoIE
|
from .vevo import VevoIE
|
||||||
|
from .vgtv import VGTVIE
|
||||||
from .vh1 import VH1IE
|
from .vh1 import VH1IE
|
||||||
|
from .vice import ViceIE
|
||||||
from .viddler import ViddlerIE
|
from .viddler import ViddlerIE
|
||||||
from .videobam import VideoBamIE
|
from .videobam import VideoBamIE
|
||||||
from .videodetective import VideoDetectiveIE
|
from .videodetective import VideoDetectiveIE
|
||||||
from .videolecturesnet import VideoLecturesNetIE
|
from .videolecturesnet import VideoLecturesNetIE
|
||||||
from .videofyme import VideofyMeIE
|
from .videofyme import VideofyMeIE
|
||||||
|
from .videomega import VideoMegaIE
|
||||||
from .videopremium import VideoPremiumIE
|
from .videopremium import VideoPremiumIE
|
||||||
from .videott import VideoTtIE
|
from .videott import VideoTtIE
|
||||||
from .videoweed import VideoWeedIE
|
from .videoweed import VideoWeedIE
|
||||||
|
from .vidme import VidmeIE
|
||||||
|
from .vidzi import VidziIE
|
||||||
|
from .vier import VierIE, VierVideosIE
|
||||||
|
from .viewster import ViewsterIE
|
||||||
from .vimeo import (
|
from .vimeo import (
|
||||||
VimeoIE,
|
VimeoIE,
|
||||||
VimeoChannelIE,
|
|
||||||
VimeoUserIE,
|
|
||||||
VimeoAlbumIE,
|
VimeoAlbumIE,
|
||||||
|
VimeoChannelIE,
|
||||||
VimeoGroupsIE,
|
VimeoGroupsIE,
|
||||||
|
VimeoLikesIE,
|
||||||
VimeoReviewIE,
|
VimeoReviewIE,
|
||||||
|
VimeoUserIE,
|
||||||
VimeoWatchLaterIE,
|
VimeoWatchLaterIE,
|
||||||
)
|
)
|
||||||
from .vimple import VimpleIE
|
from .vimple import VimpleIE
|
||||||
@@ -345,36 +596,57 @@ from .vine import (
|
|||||||
VineUserIE,
|
VineUserIE,
|
||||||
)
|
)
|
||||||
from .viki import VikiIE
|
from .viki import VikiIE
|
||||||
from .vk import VKIE
|
from .vk import (
|
||||||
|
VKIE,
|
||||||
|
VKUserVideosIE,
|
||||||
|
)
|
||||||
from .vodlocker import VodlockerIE
|
from .vodlocker import VodlockerIE
|
||||||
|
from .vporn import VpornIE
|
||||||
|
from .vrt import VRTIE
|
||||||
from .vube import VubeIE
|
from .vube import VubeIE
|
||||||
from .vuclip import VuClipIE
|
from .vuclip import VuClipIE
|
||||||
from .vulture import VultureIE
|
from .vulture import VultureIE
|
||||||
|
from .walla import WallaIE
|
||||||
from .washingtonpost import WashingtonPostIE
|
from .washingtonpost import WashingtonPostIE
|
||||||
from .wat import WatIE
|
from .wat import WatIE
|
||||||
|
from .wayofthemaster import WayOfTheMasterIE
|
||||||
from .wdr import (
|
from .wdr import (
|
||||||
WDRIE,
|
WDRIE,
|
||||||
WDRMobileIE,
|
WDRMobileIE,
|
||||||
WDRMausIE,
|
WDRMausIE,
|
||||||
)
|
)
|
||||||
|
from .webofstories import WebOfStoriesIE
|
||||||
from .weibo import WeiboIE
|
from .weibo import WeiboIE
|
||||||
from .wimp import WimpIE
|
from .wimp import WimpIE
|
||||||
from .wistia import WistiaIE
|
from .wistia import WistiaIE
|
||||||
from .worldstarhiphop import WorldStarHipHopIE
|
from .worldstarhiphop import WorldStarHipHopIE
|
||||||
from .wrzuta import WrzutaIE
|
from .wrzuta import WrzutaIE
|
||||||
|
from .wsj import WSJIE
|
||||||
from .xbef import XBefIE
|
from .xbef import XBefIE
|
||||||
|
from .xboxclips import XboxClipsIE
|
||||||
from .xhamster import XHamsterIE
|
from .xhamster import XHamsterIE
|
||||||
|
from .xminus import XMinusIE
|
||||||
from .xnxx import XNXXIE
|
from .xnxx import XNXXIE
|
||||||
from .xvideos import XVideosIE
|
from .xvideos import XVideosIE
|
||||||
from .xtube import XTubeUserIE, XTubeIE
|
from .xtube import XTubeUserIE, XTubeIE
|
||||||
|
from .xuite import XuiteIE
|
||||||
|
from .xxxymovies import XXXYMoviesIE
|
||||||
from .yahoo import (
|
from .yahoo import (
|
||||||
YahooIE,
|
YahooIE,
|
||||||
YahooNewsIE,
|
|
||||||
YahooSearchIE,
|
YahooSearchIE,
|
||||||
)
|
)
|
||||||
|
from .yam import YamIE
|
||||||
|
from .yandexmusic import (
|
||||||
|
YandexMusicTrackIE,
|
||||||
|
YandexMusicAlbumIE,
|
||||||
|
YandexMusicPlaylistIE,
|
||||||
|
)
|
||||||
|
from .yesjapan import YesJapanIE
|
||||||
|
from .ynet import YnetIE
|
||||||
from .youjizz import YouJizzIE
|
from .youjizz import YouJizzIE
|
||||||
from .youku import YoukuIE
|
from .youku import YoukuIE
|
||||||
from .youporn import YouPornIE
|
from .youporn import YouPornIE
|
||||||
|
from .yourupload import YourUploadIE
|
||||||
from .youtube import (
|
from .youtube import (
|
||||||
YoutubeIE,
|
YoutubeIE,
|
||||||
YoutubeChannelIE,
|
YoutubeChannelIE,
|
||||||
@@ -387,13 +659,17 @@ from .youtube import (
|
|||||||
YoutubeSearchURLIE,
|
YoutubeSearchURLIE,
|
||||||
YoutubeShowIE,
|
YoutubeShowIE,
|
||||||
YoutubeSubscriptionsIE,
|
YoutubeSubscriptionsIE,
|
||||||
YoutubeTopListIE,
|
YoutubeTruncatedIDIE,
|
||||||
YoutubeTruncatedURLIE,
|
YoutubeTruncatedURLIE,
|
||||||
YoutubeUserIE,
|
YoutubeUserIE,
|
||||||
YoutubeWatchLaterIE,
|
YoutubeWatchLaterIE,
|
||||||
)
|
)
|
||||||
from .zdf import ZDFIE
|
from .zapiks import ZapiksIE
|
||||||
|
from .zdf import ZDFIE, ZDFChannelIE
|
||||||
|
from .zingmp3 import (
|
||||||
|
ZingMp3SongIE,
|
||||||
|
ZingMp3AlbumIE,
|
||||||
|
)
|
||||||
|
|
||||||
_ALL_CLASSES = [
|
_ALL_CLASSES = [
|
||||||
klass
|
klass
|
||||||
@@ -410,6 +686,17 @@ def gen_extractors():
|
|||||||
return [klass() for klass in _ALL_CLASSES]
|
return [klass() for klass in _ALL_CLASSES]
|
||||||
|
|
||||||
|
|
||||||
|
def list_extractors(age_limit):
|
||||||
|
"""
|
||||||
|
Return a list of extractors that are suitable for the given age,
|
||||||
|
sorted by extractor ID.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return sorted(
|
||||||
|
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
|
||||||
|
key=lambda ie: ie.IE_NAME.lower())
|
||||||
|
|
||||||
|
|
||||||
def get_info_extractor(ie_name):
|
def get_info_extractor(ie_name):
|
||||||
"""Returns the info extractor class with the given ie_name"""
|
"""Returns the info extractor class with the given ie_name"""
|
||||||
return globals()[ie_name + 'IE']
|
return globals()[ie_name + 'IE']
|
||||||
|
|||||||
47
youtube_dl/extractor/abc.py
Normal file
47
youtube_dl/extractor/abc.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class ABCIE(InfoExtractor):
|
||||||
|
IE_NAME = 'abc.net.au'
|
||||||
|
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||||
|
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5868334',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
|
||||||
|
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
urls_info_json = self._search_regex(
|
||||||
|
r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls',
|
||||||
|
flags=re.DOTALL)
|
||||||
|
urls_info = json.loads(urls_info_json.replace('\'', '"'))
|
||||||
|
formats = [{
|
||||||
|
'url': url_info['url'],
|
||||||
|
'width': int(url_info['width']),
|
||||||
|
'height': int(url_info['height']),
|
||||||
|
'tbr': int(url_info['bitrate']),
|
||||||
|
'filesize': int(url_info['filesize']),
|
||||||
|
} for url_info in urls_info]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': self._og_search_title(webpage),
|
||||||
|
'formats': formats,
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
}
|
||||||
68
youtube_dl/extractor/abc7news.py
Normal file
68
youtube_dl/extractor/abc7news.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import parse_iso8601
|
||||||
|
|
||||||
|
|
||||||
|
class Abc7NewsIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://abc7news\.com(?:/[^/]+/(?P<display_id>[^/]+))?/(?P<id>\d+)'
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://abc7news.com/entertainment/east-bay-museum-celebrates-vintage-synthesizers/472581/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '472581',
|
||||||
|
'display_id': 'east-bay-museum-celebrates-vintage-synthesizers',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'East Bay museum celebrates history of synthesized music',
|
||||||
|
'description': 'md5:a4f10fb2f2a02565c1749d4adbab4b10',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'timestamp': 1421123075,
|
||||||
|
'upload_date': '20150113',
|
||||||
|
'uploader': 'Jonathan Bloom',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://abc7news.com/472581',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
display_id = mobj.group('display_id') or video_id
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
m3u8 = self._html_search_meta(
|
||||||
|
'contentURL', webpage, 'm3u8 url', fatal=True)
|
||||||
|
|
||||||
|
formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4')
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
title = self._og_search_title(webpage).strip()
|
||||||
|
description = self._og_search_description(webpage).strip()
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
timestamp = parse_iso8601(self._search_regex(
|
||||||
|
r'<div class="meta">\s*<time class="timeago" datetime="([^"]+)">',
|
||||||
|
webpage, 'upload date', fatal=False))
|
||||||
|
uploader = self._search_regex(
|
||||||
|
r'rel="author">([^<]+)</a>',
|
||||||
|
webpage, 'uploader', default=None)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'uploader': uploader,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -7,17 +8,25 @@ from .common import InfoExtractor
|
|||||||
class AcademicEarthCourseIE(InfoExtractor):
|
class AcademicEarthCourseIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
|
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
|
||||||
IE_NAME = 'AcademicEarth:Course'
|
IE_NAME = 'AcademicEarth:Course'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://academicearth.org/playlists/laws-of-nature/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'laws-of-nature',
|
||||||
|
'title': 'Laws of Nature',
|
||||||
|
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
|
||||||
|
},
|
||||||
|
'playlist_count': 4,
|
||||||
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
m = re.match(self._VALID_URL, url)
|
playlist_id = self._match_id(url)
|
||||||
playlist_id = m.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title')
|
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title')
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'<p class="excerpt"[^>]*?>(.*?)</p>',
|
r'<p class="excerpt"[^>]*?>(.*?)</p>',
|
||||||
webpage, u'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
urls = re.findall(
|
urls = re.findall(
|
||||||
r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
|
r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
|
||||||
webpage)
|
webpage)
|
||||||
|
|||||||
@@ -3,19 +3,19 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AddAnimeIE(InfoExtractor):
|
class AddAnimeIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<id>[\w_]+)(?:.*)'
|
||||||
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
||||||
'md5': '72954ea10bc979ab5e2eb288b21425a0',
|
'md5': '72954ea10bc979ab5e2eb288b21425a0',
|
||||||
@@ -28,9 +28,9 @@ class AddAnimeIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
video_id = mobj.group('video_id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
except ExtractorError as ee:
|
except ExtractorError as ee:
|
||||||
if not isinstance(ee.cause, compat_HTTPError) or \
|
if not isinstance(ee.cause, compat_HTTPError) or \
|
||||||
@@ -48,7 +48,7 @@ class AddAnimeIE(InfoExtractor):
|
|||||||
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
|
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
|
||||||
redir_webpage)
|
redir_webpage)
|
||||||
if av is None:
|
if av is None:
|
||||||
raise ExtractorError(u'Cannot find redirect math task')
|
raise ExtractorError('Cannot find redirect math task')
|
||||||
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
|
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
|
||||||
|
|
||||||
parsed_url = compat_urllib_parse_urlparse(url)
|
parsed_url = compat_urllib_parse_urlparse(url)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user